diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt index ad6db0ff84..83a2594551 100644 --- a/hadoop-common-project/hadoop-common/CHANGES.txt +++ b/hadoop-common-project/hadoop-common/CHANGES.txt @@ -60,6 +60,8 @@ Trunk (unreleased changes) HADOOP-7688. Add servlet handler check in HttpServer.start(). (Uma Maheswara Rao G via szetszwo) + HADOOP-7590. Mavenize streaming and MR examples. (tucu) + BUGS HADOOP-7606. Upgrade Jackson to version 1.7.1 to match the version required @@ -93,6 +95,9 @@ Trunk (unreleased changes) HADOOP-7770. ViewFS getFileChecksum throws FileNotFoundException for files in /tmp and /user. (Ravi Prakash via jitendra) + HADOOP-7833. Fix findbugs warnings in protobuf generated code. + (John Lee via suresh) + OPTIMIZATIONS HADOOP-7761. Improve the performance of raw comparisons. (todd) @@ -123,6 +128,9 @@ Release 0.23.1 - Unreleased HADOOP-7787. Make source tarball use conventional name. (Bruno Mahé via tomwhite) + HADOOP-6614. RunJar should provide more diags when it can't create + a temp file. (Jonathan Hsieh via eli) + Release 0.23.0 - 2011-11-01 INCOMPATIBLE CHANGES @@ -1356,6 +1364,8 @@ Release 0.22.0 - Unreleased HADOOP-7457. Remove out-of-date Chinese language documentation. (Jakob Homan via eli) + HADOOP-7783. Add more symlink tests that cover intermediate links. (eli) + Release 0.21.1 - Unreleased IMPROVEMENTS diff --git a/hadoop-common-project/hadoop-common/dev-support/findbugsExcludeFile.xml b/hadoop-common-project/hadoop-common/dev-support/findbugsExcludeFile.xml index 3e2d43e67e..190677248e 100644 --- a/hadoop-common-project/hadoop-common/dev-support/findbugsExcludeFile.xml +++ b/hadoop-common-project/hadoop-common/dev-support/findbugsExcludeFile.xml @@ -272,6 +272,6 @@ - + diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/AbstractFileSystem.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/AbstractFileSystem.java index f4632f30ae..79419507ba 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/AbstractFileSystem.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/AbstractFileSystem.java @@ -385,7 +385,7 @@ public String getUriPath(final Path p) { checkPath(p); String s = p.toUri().getPath(); if (!isValidName(s)) { - throw new InvalidPathException("Path part " + s + " from URI" + p + throw new InvalidPathException("Path part " + s + " from URI " + p + " is not a valid filename."); } return s; diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileContext.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileContext.java index 8b20651f0d..5a14f82a67 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileContext.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileContext.java @@ -1092,29 +1092,28 @@ public FileStatus next(final AbstractFileSystem fs, final Path p) * Return a fully qualified version of the given symlink target if it * has no scheme and authority. Partially and fully qualified paths * are returned unmodified. - * @param linkFS The AbstractFileSystem of link - * @param link The path of the symlink - * @param target The symlink's target + * @param pathFS The AbstractFileSystem of the path + * @param pathWithLink Path that contains the symlink + * @param target The symlink's absolute target * @return Fully qualified version of the target. */ - private Path qualifySymlinkTarget(final AbstractFileSystem linkFS, - Path link, Path target) { - /* NB: makeQualified uses link's scheme/authority, if specified, - * and the scheme/authority of linkFS, if not. If link does have - * a scheme and authority they should match those of linkFS since - * resolve updates the path and file system of a path that contains - * links each time a link is encountered. + private Path qualifySymlinkTarget(final AbstractFileSystem pathFS, + Path pathWithLink, Path target) { + /* NB: makeQualified uses the target's scheme and authority, if + * specified, and the scheme and authority of pathFS, if not. If + * the path does have a scheme and authority we assert they match + * those of pathFS since resolve updates the file system of a path + * that contains links each time a link is encountered. */ - final String linkScheme = link.toUri().getScheme(); - final String linkAuth = link.toUri().getAuthority(); - if (linkScheme != null && linkAuth != null) { - assert linkScheme.equals(linkFS.getUri().getScheme()); - assert linkAuth.equals(linkFS.getUri().getAuthority()); + final String scheme = target.toUri().getScheme(); + final String auth = target.toUri().getAuthority(); + if (scheme != null && auth != null) { + assert scheme.equals(pathFS.getUri().getScheme()); + assert auth.equals(pathFS.getUri().getAuthority()); } - final boolean justPath = (target.toUri().getScheme() == null && - target.toUri().getAuthority() == null); - return justPath ? target.makeQualified(linkFS.getUri(), link.getParent()) - : target; + return (scheme == null && auth == null) + ? target.makeQualified(pathFS.getUri(), pathWithLink.getParent()) + : target; } /** @@ -1148,16 +1147,19 @@ public FileStatus next(final AbstractFileSystem fs, final Path p) } /** - * Returns the un-interpreted target of the given symbolic link. - * Transparently resolves all links up to the final path component. - * @param f + * Returns the target of the given symbolic link as it was specified + * when the link was created. Links in the path leading up to the + * final path component are resolved transparently. + * + * @param f the path to return the target of * @return The un-interpreted target of the symbolic link. * * @throws AccessControlException If access is denied * @throws FileNotFoundException If path f does not exist * @throws UnsupportedFileSystemException If file system for f is * not supported - * @throws IOException If an I/O error occurred + * @throws IOException If the given path does not refer to a symlink + * or an I/O error occurred */ public Path getLinkTarget(final Path f) throws AccessControlException, FileNotFoundException, UnsupportedFileSystemException, IOException { @@ -1277,7 +1279,7 @@ public FsStatus next(final AbstractFileSystem fs, final Path p) * getFsStatus, getFileStatus, exists, and listStatus. * * Symlink targets are stored as given to createSymlink, assuming the - * underlying file system is capable of storign a fully qualified URI. + * underlying file system is capable of storing a fully qualified URI. * Dangling symlinks are permitted. FileContext supports four types of * symlink targets, and resolves them as follows *
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Path.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Path.java
index f2a7676e15..81c79dbded 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Path.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Path.java
@@ -68,13 +68,14 @@ public Path(Path parent, Path child) {
     // Add a slash to parent's path so resolution is compatible with URI's
     URI parentUri = parent.uri;
     String parentPath = parentUri.getPath();
-    if (!(parentPath.equals("/") || parentPath.equals("")))
+    if (!(parentPath.equals("/") || parentPath.equals(""))) {
       try {
         parentUri = new URI(parentUri.getScheme(), parentUri.getAuthority(),
                       parentUri.getPath()+"/", null, parentUri.getFragment());
       } catch (URISyntaxException e) {
         throw new IllegalArgumentException(e);
       }
+    }
     URI resolved = parentUri.resolve(child.uri);
     initialize(resolved.getScheme(), resolved.getAuthority(),
                resolved.getPath(), resolved.getFragment());
@@ -213,7 +214,8 @@ public boolean isUriPathAbsolute() {
    * There is some ambiguity here. An absolute path is a slash
    * relative name without a scheme or an authority.
    * So either this method was incorrectly named or its
-   * implementation is incorrect.
+   * implementation is incorrect. This method returns true
+   * even if there is a scheme and authority.
    */
   public boolean isAbsolute() {
      return isUriPathAbsolute();
@@ -307,19 +309,16 @@ public int depth() {
     return depth;
   }
 
-  
   /**
    *  Returns a qualified path object.
    *  
    *  Deprecated - use {@link #makeQualified(URI, Path)}
    */
- 
   @Deprecated
   public Path makeQualified(FileSystem fs) {
     return makeQualified(fs.getUri(), fs.getWorkingDirectory());
   }
   
-  
   /** Returns a qualified path object. */
   @InterfaceAudience.LimitedPrivate({"HDFS", "MapReduce"})
   public Path makeQualified(URI defaultUri, Path workingDir ) {
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/RunJar.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/RunJar.java
index 46993e16aa..a785cacc34 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/RunJar.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/RunJar.java
@@ -149,7 +149,18 @@ public static void main(String[] args) throws Throwable {
     File tmpDir = new File(new Configuration().get("hadoop.tmp.dir"));
     ensureDirectory(tmpDir);
 
-    final File workDir = File.createTempFile("hadoop-unjar", "", tmpDir);
+    final File workDir;
+    try { 
+      workDir = File.createTempFile("hadoop-unjar", "", tmpDir);
+    } catch (IOException ioe) {
+      // If user has insufficient perms to write to tmpDir, default  
+      // "Permission denied" message doesn't specify a filename. 
+      System.err.println("Error creating temp dir in hadoop.tmp.dir "
+                         + tmpDir + " due to " + ioe.getMessage());
+      System.exit(-1);
+      return;
+    }
+
     if (!workDir.delete()) {
       System.err.println("Delete failed for " + workDir);
       System.exit(-1);
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileContextSymlinkBaseTest.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileContextSymlinkBaseTest.java
index fd3283d468..da9b895b57 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileContextSymlinkBaseTest.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileContextSymlinkBaseTest.java
@@ -28,8 +28,9 @@
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.FSDataOutputStream;
 import static org.apache.hadoop.fs.FileContextTestHelper.*;
-import static org.junit.Assert.*;
 
+import static org.junit.Assert.*;
+import static org.junit.Assume.assumeTrue;
 import org.junit.Test;
 import org.junit.Before;
 import org.junit.After;
@@ -238,6 +239,31 @@ public void testStatLinkToFile() throws IOException {
     assertFalse(isDir(fc, linkToFile));
     assertEquals(file.toUri().getPath(), 
                  fc.getLinkTarget(linkToFile).toString());
+    // The local file system does not fully resolve the link
+    // when obtaining the file status
+    if (!"file".equals(getScheme())) {
+      assertEquals(fc.getFileStatus(file), fc.getFileStatus(linkToFile));
+      assertEquals(fc.makeQualified(file),
+                   fc.getFileStatus(linkToFile).getPath());
+      assertEquals(fc.makeQualified(linkToFile),
+                   fc.getFileLinkStatus(linkToFile).getPath());
+    }
+  }
+
+  @Test
+  /** Stat a relative link to a file */
+  public void testStatRelLinkToFile() throws IOException {
+    assumeTrue(!"file".equals(getScheme()));
+    Path baseDir    = new Path(testBaseDir1());
+    Path file       = new Path(testBaseDir1(), "file");
+    Path linkToFile = new Path(testBaseDir1(), "linkToFile");
+    createAndWriteFile(file);
+    fc.createSymlink(new Path("file"), linkToFile, false);
+    assertEquals(fc.getFileStatus(file), fc.getFileStatus(linkToFile));
+    assertEquals(fc.makeQualified(file),
+                 fc.getFileStatus(linkToFile).getPath());
+    assertEquals(fc.makeQualified(linkToFile),
+                 fc.getFileLinkStatus(linkToFile).getPath());
   }
 
   @Test
@@ -474,18 +500,15 @@ public void testCreateLinkUsingFullyQualPaths() throws IOException {
    * creating using a partially qualified path is file system specific.
    */
   public void testCreateLinkUsingPartQualPath1() throws IOException {
+    // Partially qualified paths are covered for local file systems
+    // in the previous test.
+    assumeTrue(!"file".equals(getScheme()));
     Path schemeAuth   = new Path(testURI().toString());
     Path fileWoHost   = new Path(getScheme()+"://"+testBaseDir1()+"/file");
     Path link         = new Path(testBaseDir1()+"/linkToFile");
     Path linkQual     = new Path(schemeAuth, testBaseDir1()+"/linkToFile");
-    
-    // Partially qualified paths are covered for local file systems
-    // in the previous test.
-    if ("file".equals(getScheme())) {
-      return;
-    }
     FileContext localFc = FileContext.getLocalFSFileContext();
-    
+
     fc.createSymlink(fileWoHost, link, false);
     // Partially qualified path is stored
     assertEquals(fileWoHost, fc.getLinkTarget(linkQual));    
@@ -748,7 +771,7 @@ public void testCreateLinkToDotDot() throws IOException {
   }
 
   @Test
-  /** Test create symlink to ../foo */
+  /** Test create symlink to ../file */
   public void testCreateLinkToDotDotPrefix() throws IOException {
     Path file = new Path(testBaseDir1(), "file");
     Path dir  = new Path(testBaseDir1(), "test");
@@ -1205,24 +1228,30 @@ public void testRenameFileWithDestParentSymlink() throws IOException {
   }
   
   @Test
-  /** Operate on a file using a path with an intermediate symlink */  
-  public void testAccessFileViaSymlink() throws IOException {
+  /**
+   * Create, write, read, append, rename, get the block locations,
+   * checksums, and delete a file using a path with a symlink as an
+   * intermediate path component where the link target was specified
+   * using an absolute path. Rename is covered in more depth below.
+   */
+  public void testAccessFileViaInterSymlinkAbsTarget() throws IOException {
     Path baseDir        = new Path(testBaseDir1());
+    Path file           = new Path(testBaseDir1(), "file");
     Path fileNew        = new Path(baseDir, "fileNew");
     Path linkToDir      = new Path(testBaseDir2(), "linkToDir");
     Path fileViaLink    = new Path(linkToDir, "file");
     Path fileNewViaLink = new Path(linkToDir, "fileNew");
     fc.createSymlink(baseDir, linkToDir, false);
-    // Create, write, read, append, rename, get block locations and 
-    // checksums, and delete a file using a path that contains a 
-    // symlink as an intermediate path component. Rename is covered 
-    // in more depth below.
     createAndWriteFile(fileViaLink);
     assertTrue(exists(fc, fileViaLink));
     assertTrue(isFile(fc, fileViaLink));
     assertFalse(isDir(fc, fileViaLink));
     assertFalse(fc.getFileLinkStatus(fileViaLink).isSymlink());
     assertFalse(isDir(fc, fileViaLink));
+    assertEquals(fc.getFileStatus(file),
+                 fc.getFileLinkStatus(file));
+    assertEquals(fc.getFileStatus(fileViaLink),
+                 fc.getFileLinkStatus(fileViaLink));
     readFile(fileViaLink);
     appendToFile(fileViaLink);
     fc.rename(fileViaLink, fileNewViaLink);
@@ -1237,6 +1266,58 @@ public void testAccessFileViaSymlink() throws IOException {
     assertFalse(exists(fc, fileNewViaLink));
   }
 
+  @Test
+  /**
+   * Operate on a file using a path with an intermediate symlink where
+   * the link target was specified as a fully qualified path.
+   */
+  public void testAccessFileViaInterSymlinkQualTarget() throws IOException {
+    Path baseDir        = new Path(testBaseDir1());
+    Path file           = new Path(testBaseDir1(), "file");
+    Path fileNew        = new Path(baseDir, "fileNew");
+    Path linkToDir      = new Path(testBaseDir2(), "linkToDir");
+    Path fileViaLink    = new Path(linkToDir, "file");
+    Path fileNewViaLink = new Path(linkToDir, "fileNew");
+    fc.createSymlink(fc.makeQualified(baseDir), linkToDir, false);
+    createAndWriteFile(fileViaLink);
+    assertEquals(fc.getFileStatus(file),
+                 fc.getFileLinkStatus(file));
+    assertEquals(fc.getFileStatus(fileViaLink),
+                 fc.getFileLinkStatus(fileViaLink));
+    readFile(fileViaLink);
+  }
+
+  @Test
+  /**
+   * Operate on a file using a path with an intermediate symlink where
+   * the link target was specified as a relative path.
+   */
+  public void testAccessFileViaInterSymlinkRelTarget() throws IOException {
+    assumeTrue(!"file".equals(getScheme()));
+    Path baseDir     = new Path(testBaseDir1());
+    Path dir         = new Path(testBaseDir1(), "dir");
+    Path file        = new Path(dir, "file");
+    Path linkToDir   = new Path(testBaseDir1(), "linkToDir");
+    Path fileViaLink = new Path(linkToDir, "file");
+
+    fc.mkdir(dir, FileContext.DEFAULT_PERM, false);
+    fc.createSymlink(new Path("dir"), linkToDir, false);
+    createAndWriteFile(fileViaLink);
+    // Note that getFileStatus returns fully qualified paths even
+    // when called on an absolute path.
+    assertEquals(fc.makeQualified(file),
+                 fc.getFileStatus(file).getPath());
+    // In each case getFileLinkStatus returns the same FileStatus
+    // as getFileStatus since we're not calling it on a link and
+    // FileStatus objects are compared by Path.
+    assertEquals(fc.getFileStatus(file),
+                 fc.getFileLinkStatus(file));
+    assertEquals(fc.getFileStatus(fileViaLink),
+                 fc.getFileLinkStatus(fileViaLink));
+    assertEquals(fc.getFileStatus(fileViaLink),
+                 fc.getFileLinkStatus(file));
+  }
+
   @Test
   /** Test create, list, and delete a directory through a symlink */
   public void testAccessDirViaSymlink() throws IOException {
@@ -1272,4 +1353,4 @@ public void testSetTimes() throws IOException {
       assertEquals(2, fc.getFileStatus(file).getModificationTime());
     }
   }
-}
\ No newline at end of file
+}
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestPath.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestPath.java
index d36a39edb8..b1d92686c2 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestPath.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestPath.java
@@ -95,6 +95,7 @@ public void testParent() {
     assertEquals(new Path("/foo"), new Path("/foo/bar").getParent());
     assertEquals(new Path("foo"), new Path("foo/bar").getParent());
     assertEquals(new Path("/"), new Path("/foo").getParent());
+    assertEquals(null, new Path("/").getParent());
     if (Path.WINDOWS) {
       assertEquals(new Path("c:/"), new Path("c:/foo").getParent());
     }
@@ -159,6 +160,13 @@ public void testDots() {
     assertEquals(new Path("foo/bar/baz","../../..").toString(), "");
     assertEquals(new Path("foo/bar/baz","../../../../..").toString(), "../..");
   }
+
+  /** Test Path objects created from other Path objects */
+  public void testChildParentResolution() throws URISyntaxException, IOException {
+    Path parent = new Path("foo1://bar1/baz1");
+    Path child  = new Path("foo2://bar2/baz2");
+    assertEquals(child, new Path(parent, child));
+  }
   
   public void testScheme() throws java.io.IOException {
     assertEquals("foo:/bar", new Path("foo:/","/bar").toString()); 
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 3ee413ea7f..9fc70f96c1 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -5,6 +5,12 @@ Trunk (unreleased changes)
     HDFS-395.  DFS Scalability: Incremental block reports. (Tomasz Nykiel
                via hairong)
 
+    HDFS-2517. Add protobuf service for JounralProtocol. (suresh)
+
+    HDFS-2518. Add protobuf service for NamenodeProtocol. (suresh)
+
+    HDFS-2520. Add protobuf service for InterDatanodeProtocol. (suresh)
+
   IMPROVEMENTS
 
     HADOOP-7524 Change RPC to allow multiple protocols including multuple 
@@ -52,6 +58,13 @@ Trunk (unreleased changes)
 
     HDFS-2334. Add Closeable to JournalManager. (Ivan Kelly via jitendra)
 
+    HDFS-2564. Cleanup unnecessary exceptions thrown and unnecessary casts.
+    (Hari Mankude via eli)
+
+    HDFS-2572. Remove unnecessary double-check in DN#getHostName. (harsh)
+
+    HDFS-2410. Further cleanup of hardcoded configuration keys and values.
+    (suresh)
 
   OPTIMIZATIONS
     HDFS-2477. Optimize computing the diff between a block report and the
@@ -100,6 +113,9 @@ Trunk (unreleased changes)
     HDFS-2526. (Client)NamenodeProtocolTranslatorR23 do not need to keep a
                reference to rpcProxyWithoutRetry (atm)
 
+    HDFS-2532. TestDfsOverAvroRpc timing out in trunk (Uma Maheswara Rao G
+               via todd)
+
 Release 0.23.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES
@@ -117,6 +133,24 @@ Release 0.23.1 - UNRELEASED
     HDFS-2562. Refactor DN configuration variables out of DataNode class
     (todd)
 
+    HDFS-2563. Some cleanup in BPOfferService. (todd)
+
+    HDFS-208. name node should warn if only one dir is listed in dfs.name.dir.
+    (Uma Maheswara Rao G via eli)
+
+    HDFS-2568. Use a set to manage child sockets in XceiverServer.
+    (harsh via eli)
+
+    HDFS-2454. Move maxXceiverCount check to before starting the
+    thread in dataXceiver. (harsh via eli)
+
+    HDFS-2570. Add descriptions for dfs.*.https.address in hdfs-default.xml.
+    (eli)
+
+    HDFS-2536. Remove unused imports. (harsh via eli)
+
+    HDFS-2566. Move BPOfferService to be a non-inner class. (todd)
+
   OPTIMIZATIONS
 
     HDFS-2130. Switch default checksum to CRC32C. (todd)
@@ -127,8 +161,23 @@ Release 0.23.1 - UNRELEASED
     HDFS-2129. Simplify BlockReader to not inherit from FSInputChecker.
     (todd)
 
+    HDFS-2246. Enable reading a block directly from local file system
+    for a client on the same node as the block file.  (Andrew Purtell,
+    Suresh Srinivas and Jitendra Nath Pandey via szetszwo)
+
   BUG FIXES
 
+    HDFS-2541. For a sufficiently large value of blocks, the DN Scanner 
+    may request a random number with a negative seed value. (harsh via eli)
+
+    HDFS-2502. hdfs-default.xml should include dfs.name.dir.restore.
+    (harsh via eli)
+
+    HDFS-2567. When 0 DNs are available, show a proper error when
+    trying to browse DFS via web UI. (harsh via eli)
+
+    HDFS-2575. DFSTestUtil may create empty files (todd)
+    
 Release 0.23.0 - 2011-11-01 
 
   INCOMPATIBLE CHANGES
@@ -1902,6 +1951,11 @@ Release 0.22.0 - Unreleased
     HDFS-2002. Incorrect computation of needed blocks in getTurnOffTip().
     (Plamen Jeliazkov via shv)
 
+    HDFS-2514. Link resolution bug for intermediate symlinks with
+    relative targets. (eli)
+
+    HDFS-2573. TestFiDataXceiverServer is failing, not testing OOME (cos)
+
 Release 0.21.1 - Unreleased
 
     HDFS-1466. TestFcHdfsSymlink relies on /tmp/test not existing. (eli)
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockReaderLocal.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockReaderLocal.java
new file mode 100644
index 0000000000..d34d74d438
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockReaderLocal.java
@@ -0,0 +1,380 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs;
+
+import java.io.DataInputStream;
+import java.io.File;
+import java.io.FileInputStream;
+import java.io.IOException;
+import java.nio.ByteBuffer;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.LinkedHashMap;
+import java.util.Map;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
+import org.apache.hadoop.hdfs.protocol.BlockLocalPathInfo;
+import org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol;
+import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
+import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
+import org.apache.hadoop.hdfs.server.datanode.BlockMetadataHeader;
+import org.apache.hadoop.hdfs.server.datanode.FSDataset;
+import org.apache.hadoop.ipc.RPC;
+import org.apache.hadoop.security.token.Token;
+import org.apache.hadoop.util.DataChecksum;
+
+/**
+ * BlockReaderLocal enables local short circuited reads. If the DFS client is on
+ * the same machine as the datanode, then the client can read files directly
+ * from the local file system rather than going through the datanode for better
+ * performance. 
+ * {@link BlockReaderLocal} works as follows: + * + */ +class BlockReaderLocal extends RemoteBlockReader2 { + public static final Log LOG = LogFactory.getLog(DFSClient.class); + + //Stores the cache and proxy for a local datanode. + private static class LocalDatanodeInfo { + private ClientDatanodeProtocol proxy = null; + private final Map cache; + + LocalDatanodeInfo() { + final int cacheSize = 10000; + final float hashTableLoadFactor = 0.75f; + int hashTableCapacity = (int) Math.ceil(cacheSize / hashTableLoadFactor) + 1; + cache = Collections + .synchronizedMap(new LinkedHashMap( + hashTableCapacity, hashTableLoadFactor, true) { + private static final long serialVersionUID = 1; + + @Override + protected boolean removeEldestEntry( + Map.Entry eldest) { + return size() > cacheSize; + } + }); + } + + private synchronized ClientDatanodeProtocol getDatanodeProxy( + DatanodeInfo node, Configuration conf, int socketTimeout) + throws IOException { + if (proxy == null) { + proxy = DFSUtil.createClientDatanodeProtocolProxy(node, conf, + socketTimeout); + } + return proxy; + } + + private synchronized void resetDatanodeProxy() { + if (null != proxy) { + RPC.stopProxy(proxy); + proxy = null; + } + } + + private BlockLocalPathInfo getBlockLocalPathInfo(ExtendedBlock b) { + return cache.get(b); + } + + private void setBlockLocalPathInfo(ExtendedBlock b, BlockLocalPathInfo info) { + cache.put(b, info); + } + + private void removeBlockLocalPathInfo(ExtendedBlock b) { + cache.remove(b); + } + } + + // Multiple datanodes could be running on the local machine. Store proxies in + // a map keyed by the ipc port of the datanode. + private static Map localDatanodeInfoMap = new HashMap(); + + private final FileInputStream dataIn; // reader for the data file + + private FileInputStream checksumIn; // reader for the checksum file + + private int offsetFromChunkBoundary; + + ByteBuffer dataBuff = null; + ByteBuffer checksumBuff = null; + + /** + * The only way this object can be instantiated. + */ + static BlockReaderLocal newBlockReader(Configuration conf, String file, + ExtendedBlock blk, Token token, DatanodeInfo node, + int socketTimeout, long startOffset, long length) throws IOException { + + LocalDatanodeInfo localDatanodeInfo = getLocalDatanodeInfo(node + .getIpcPort()); + // check the cache first + BlockLocalPathInfo pathinfo = localDatanodeInfo.getBlockLocalPathInfo(blk); + if (pathinfo == null) { + pathinfo = getBlockPathInfo(blk, node, conf, socketTimeout, token); + } + + // check to see if the file exists. It may so happen that the + // HDFS file has been deleted and this block-lookup is occurring + // on behalf of a new HDFS file. This time, the block file could + // be residing in a different portion of the fs.data.dir directory. + // In this case, we remove this entry from the cache. The next + // call to this method will re-populate the cache. + FileInputStream dataIn = null; + FileInputStream checksumIn = null; + BlockReaderLocal localBlockReader = null; + boolean skipChecksumCheck = skipChecksumCheck(conf); + try { + // get a local file system + File blkfile = new File(pathinfo.getBlockPath()); + dataIn = new FileInputStream(blkfile); + + if (LOG.isDebugEnabled()) { + LOG.debug("New BlockReaderLocal for file " + blkfile + " of size " + + blkfile.length() + " startOffset " + startOffset + " length " + + length + " short circuit checksum " + skipChecksumCheck); + } + + if (!skipChecksumCheck) { + // get the metadata file + File metafile = new File(pathinfo.getMetaPath()); + checksumIn = new FileInputStream(metafile); + + // read and handle the common header here. For now just a version + BlockMetadataHeader header = BlockMetadataHeader + .readHeader(new DataInputStream(checksumIn)); + short version = header.getVersion(); + if (version != FSDataset.METADATA_VERSION) { + LOG.warn("Wrong version (" + version + ") for metadata file for " + + blk + " ignoring ..."); + } + DataChecksum checksum = header.getChecksum(); + long firstChunkOffset = startOffset + - (startOffset % checksum.getBytesPerChecksum()); + localBlockReader = new BlockReaderLocal(conf, file, blk, token, + startOffset, length, pathinfo, checksum, true, dataIn, + firstChunkOffset, checksumIn); + } else { + localBlockReader = new BlockReaderLocal(conf, file, blk, token, + startOffset, length, pathinfo, dataIn); + } + } catch (IOException e) { + // remove from cache + localDatanodeInfo.removeBlockLocalPathInfo(blk); + DFSClient.LOG.warn("BlockReaderLocal: Removing " + blk + + " from cache because local file " + pathinfo.getBlockPath() + + " could not be opened."); + throw e; + } finally { + if (localBlockReader == null) { + if (dataIn != null) { + dataIn.close(); + } + if (checksumIn != null) { + checksumIn.close(); + } + } + } + return localBlockReader; + } + + private static synchronized LocalDatanodeInfo getLocalDatanodeInfo(int port) { + LocalDatanodeInfo ldInfo = localDatanodeInfoMap.get(port); + if (ldInfo == null) { + ldInfo = new LocalDatanodeInfo(); + localDatanodeInfoMap.put(port, ldInfo); + } + return ldInfo; + } + + private static BlockLocalPathInfo getBlockPathInfo(ExtendedBlock blk, + DatanodeInfo node, Configuration conf, int timeout, + Token token) throws IOException { + LocalDatanodeInfo localDatanodeInfo = getLocalDatanodeInfo(node.ipcPort); + BlockLocalPathInfo pathinfo = null; + ClientDatanodeProtocol proxy = localDatanodeInfo.getDatanodeProxy(node, + conf, timeout); + try { + // make RPC to local datanode to find local pathnames of blocks + pathinfo = proxy.getBlockLocalPathInfo(blk, token); + if (pathinfo != null) { + if (LOG.isDebugEnabled()) { + LOG.debug("Cached location of block " + blk + " as " + pathinfo); + } + localDatanodeInfo.setBlockLocalPathInfo(blk, pathinfo); + } + } catch (IOException e) { + localDatanodeInfo.resetDatanodeProxy(); // Reset proxy on error + throw e; + } + return pathinfo; + } + + private static boolean skipChecksumCheck(Configuration conf) { + return conf.getBoolean( + DFSConfigKeys.DFS_CLIENT_READ_SHORTCIRCUIT_SKIP_CHECKSUM_KEY, + DFSConfigKeys.DFS_CLIENT_READ_SHORTCIRCUIT_SKIP_CHECKSUM_DEFAULT); + } + + private BlockReaderLocal(Configuration conf, String hdfsfile, + ExtendedBlock block, Token token, long startOffset, + long length, BlockLocalPathInfo pathinfo, FileInputStream dataIn) + throws IOException { + this(conf, hdfsfile, block, token, startOffset, length, pathinfo, + DataChecksum.newDataChecksum(DataChecksum.CHECKSUM_NULL, 4), false, + dataIn, startOffset, null); + } + + private BlockReaderLocal(Configuration conf, String hdfsfile, + ExtendedBlock block, Token token, long startOffset, + long length, BlockLocalPathInfo pathinfo, DataChecksum checksum, + boolean verifyChecksum, FileInputStream dataIn, long firstChunkOffset, + FileInputStream checksumIn) throws IOException { + super(hdfsfile, block.getBlockPoolId(), block.getBlockId(), dataIn + .getChannel(), checksum, verifyChecksum, startOffset, firstChunkOffset, + length, null); + this.dataIn = dataIn; + this.checksumIn = checksumIn; + this.offsetFromChunkBoundary = (int) (startOffset-firstChunkOffset); + dataBuff = bufferPool.getBuffer(bytesPerChecksum*64); + checksumBuff = bufferPool.getBuffer(checksumSize*64); + //Initially the buffers have nothing to read. + dataBuff.flip(); + checksumBuff.flip(); + long toSkip = firstChunkOffset; + while (toSkip > 0) { + long skipped = dataIn.skip(toSkip); + if (skipped == 0) { + throw new IOException("Couldn't initialize input stream"); + } + toSkip -= skipped; + } + if (checksumIn != null) { + long checkSumOffset = (firstChunkOffset / bytesPerChecksum) + * checksumSize; + while (checkSumOffset > 0) { + long skipped = checksumIn.skip(checkSumOffset); + if (skipped == 0) { + throw new IOException("Couldn't initialize checksum input stream"); + } + checkSumOffset -= skipped; + } + } + } + + private int readIntoBuffer(FileInputStream stream, ByteBuffer buf) + throws IOException { + int bytesRead = stream.getChannel().read(buf); + if (bytesRead < 0) { + //EOF + return bytesRead; + } + while (buf.remaining() > 0) { + int n = stream.getChannel().read(buf); + if (n < 0) { + //EOF + return bytesRead; + } + bytesRead += n; + } + return bytesRead; + } + + @Override + public synchronized int read(byte[] buf, int off, int len) throws IOException { + if (LOG.isDebugEnabled()) { + LOG.info("read off " + off + " len " + len); + } + if (!verifyChecksum) { + return dataIn.read(buf, off, len); + } else { + int dataRead = -1; + if (dataBuff.remaining() == 0) { + dataBuff.clear(); + checksumBuff.clear(); + dataRead = readIntoBuffer(dataIn, dataBuff); + readIntoBuffer(checksumIn, checksumBuff); + checksumBuff.flip(); + dataBuff.flip(); + if (verifyChecksum) { + checksum.verifyChunkedSums(dataBuff, checksumBuff, filename, + this.startOffset); + } + } else { + dataRead = dataBuff.remaining(); + } + if (dataRead > 0) { + int nRead = Math.min(dataRead - offsetFromChunkBoundary, len); + if (offsetFromChunkBoundary > 0) { + dataBuff.position(offsetFromChunkBoundary); + // Its either end of file or dataRead is greater than the + // offsetFromChunkBoundary + offsetFromChunkBoundary = 0; + } + if (nRead > 0) { + dataBuff.get(buf, off, nRead); + return nRead; + } else { + return 0; + } + } else { + return -1; + } + } + } + + @Override + public synchronized long skip(long n) throws IOException { + if (LOG.isDebugEnabled()) { + LOG.debug("skip " + n); + } + if (!verifyChecksum) { + return dataIn.skip(n); + } else { + return super.skip(n); + } + } + + @Override + public synchronized void close() throws IOException { + dataIn.close(); + if (checksumIn != null) { + checksumIn.close(); + } + if (dataBuff != null) { + bufferPool.returnBuffer(dataBuff); + dataBuff = null; + } + if (checksumBuff != null) { + bufferPool.returnBuffer(checksumBuff); + checksumBuff = null; + } + super.close(); + } +} \ No newline at end of file diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java index c964346c74..9f44415526 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java @@ -24,13 +24,20 @@ import java.io.FileNotFoundException; import java.io.IOException; import java.io.OutputStream; +import java.net.InetAddress; import java.net.InetSocketAddress; +import java.net.NetworkInterface; import java.net.Socket; +import java.net.SocketException; import java.net.URI; +import java.util.Collections; import java.util.EnumSet; import java.util.HashMap; +import java.util.HashSet; import java.util.List; import java.util.Map; +import java.util.Set; + import javax.net.SocketFactory; import org.apache.commons.logging.Log; @@ -77,6 +84,8 @@ import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto; import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto; import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status; +import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier; +import org.apache.hadoop.hdfs.security.token.block.InvalidBlockTokenException; import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants; import org.apache.hadoop.hdfs.server.common.UpgradeStatusReport; @@ -234,6 +243,8 @@ Conf getConf() { private final Map filesBeingWritten = new HashMap(); + private boolean shortCircuitLocalReads; + /** * Same as this(NameNode.getAddress(conf), conf); * @see #DFSClient(InetSocketAddress, Configuration) @@ -317,6 +328,13 @@ public DFSClient(URI nameNodeUri, Configuration conf, "Expecting exactly one of nameNodeUri and rpcNamenode being null: " + "nameNodeUri=" + nameNodeUri + ", rpcNamenode=" + rpcNamenode); } + // read directly from the block file if configured. + this.shortCircuitLocalReads = conf.getBoolean( + DFSConfigKeys.DFS_CLIENT_READ_SHORTCIRCUIT_KEY, + DFSConfigKeys.DFS_CLIENT_READ_SHORTCIRCUIT_DEFAULT); + if (LOG.isDebugEnabled()) { + LOG.debug("Short circuit read is " + shortCircuitLocalReads); + } } private Class getFailoverProxyProviderClass(String authority, Configuration conf) @@ -539,6 +557,82 @@ public long renewDelegationToken(Token token) } } + /** + * Get {@link BlockReader} for short circuited local reads. + */ + static BlockReader getLocalBlockReader(Configuration conf, + String src, ExtendedBlock blk, Token accessToken, + DatanodeInfo chosenNode, int socketTimeout, long offsetIntoBlock) + throws InvalidToken, IOException { + try { + return BlockReaderLocal.newBlockReader(conf, src, blk, accessToken, + chosenNode, socketTimeout, offsetIntoBlock, blk.getNumBytes() + - offsetIntoBlock); + } catch (RemoteException re) { + throw re.unwrapRemoteException(InvalidToken.class, + AccessControlException.class); + } + } + + private static Set localIpAddresses = Collections + .synchronizedSet(new HashSet()); + + private static boolean isLocalAddress(InetSocketAddress targetAddr) { + InetAddress addr = targetAddr.getAddress(); + if (localIpAddresses.contains(addr.getHostAddress())) { + if (LOG.isTraceEnabled()) { + LOG.trace("Address " + targetAddr + " is local"); + } + return true; + } + + // Check if the address is any local or loop back + boolean local = addr.isAnyLocalAddress() || addr.isLoopbackAddress(); + + // Check if the address is defined on any interface + if (!local) { + try { + local = NetworkInterface.getByInetAddress(addr) != null; + } catch (SocketException e) { + local = false; + } + } + if (LOG.isTraceEnabled()) { + LOG.trace("Address " + targetAddr + " is local"); + } + if (local == true) { + localIpAddresses.add(addr.getHostAddress()); + } + return local; + } + + /** + * Should the block access token be refetched on an exception + * + * @param ex Exception received + * @param targetAddr Target datanode address from where exception was received + * @return true if block access token has expired or invalid and it should be + * refetched + */ + private static boolean tokenRefetchNeeded(IOException ex, + InetSocketAddress targetAddr) { + /* + * Get a new access token and retry. Retry is needed in 2 cases. 1) When + * both NN and DN re-started while DFSClient holding a cached access token. + * 2) In the case that NN fails to update its access key at pre-set interval + * (by a wide margin) and subsequently restarts. In this case, DN + * re-registers itself with NN and receives a new access key, but DN will + * delete the old access key from its memory since it's considered expired + * based on the estimated expiration date. + */ + if (ex instanceof InvalidBlockTokenException || ex instanceof InvalidToken) { + LOG.info("Access token was invalid when connecting to " + targetAddr + + " : " + ex); + return true; + } + return false; + } + /** * Cancel a delegation token * @param token the token to cancel @@ -1630,7 +1724,7 @@ public ExtendedBlock getCurrentBlock() { synchronized List getAllBlocks() throws IOException { return ((DFSInputStream)in).getAllBlocks(); } - + /** * @return The visible length of the file. */ @@ -1638,6 +1732,14 @@ public long getVisibleLength() throws IOException { return ((DFSInputStream)in).getFileLength(); } } + + boolean shouldTryShortCircuitRead(InetSocketAddress targetAddr) + throws IOException { + if (shortCircuitLocalReads && isLocalAddress(targetAddr)) { + return true; + } + return false; + } void reportChecksumFailure(String file, ExtendedBlock blk, DatanodeInfo dn) { DatanodeInfo [] dnArr = { dn }; @@ -1660,4 +1762,8 @@ public String toString() { return getClass().getSimpleName() + "[clientName=" + clientName + ", ugi=" + ugi + "]"; } + + void disableShortCircuit() { + shortCircuitLocalReads = false; + } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java index 27a29775bd..9f774d00aa 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java @@ -262,6 +262,11 @@ public class DFSConfigKeys extends CommonConfigurationKeys { public static final String DFS_DEFAULT_MAX_CORRUPT_FILES_RETURNED_KEY = "dfs.corruptfilesreturned.max"; public static final int DFS_DEFAULT_MAX_CORRUPT_FILES_RETURNED = 500; + public static final String DFS_CLIENT_READ_SHORTCIRCUIT_KEY = "dfs.client.read.shortcircuit"; + public static final boolean DFS_CLIENT_READ_SHORTCIRCUIT_DEFAULT = false; + public static final String DFS_CLIENT_READ_SHORTCIRCUIT_SKIP_CHECKSUM_KEY = "dfs.client.read.shortcircuit.skip.checksum"; + public static final boolean DFS_CLIENT_READ_SHORTCIRCUIT_SKIP_CHECKSUM_DEFAULT = false; + // property for fsimage compression public static final String DFS_IMAGE_COMPRESS_KEY = "dfs.image.compress"; public static final boolean DFS_IMAGE_COMPRESS_DEFAULT = false; @@ -302,6 +307,8 @@ public class DFSConfigKeys extends CommonConfigurationKeys { public static final String DFS_WEB_AUTHENTICATION_KERBEROS_PRINCIPAL_KEY = "dfs.web.authentication.kerberos.principal"; public static final String DFS_WEB_AUTHENTICATION_KERBEROS_KEYTAB_KEY = "dfs.web.authentication.kerberos.keytab"; + public static final String DFS_BLOCK_LOCAL_PATH_ACCESS_USER_KEY = "dfs.block.local-path-access.user"; + // HA related configuration public static final String DFS_HA_NAMENODES_KEY = "dfs.ha.namenodes"; public static final String DFS_HA_NAMENODE_ID_KEY = "dfs.ha.namenode.id"; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java index 71ec00e20e..2b817ffec0 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java @@ -46,6 +46,7 @@ import org.apache.hadoop.ipc.RPC; import org.apache.hadoop.ipc.RemoteException; import org.apache.hadoop.net.NetUtils; +import org.apache.hadoop.security.AccessControlException; import org.apache.hadoop.security.token.Token; /**************************************************************** @@ -405,11 +406,8 @@ private synchronized DatanodeInfo blockSeekTo(long target) throws IOException { try { ExtendedBlock blk = targetBlock.getBlock(); Token accessToken = targetBlock.getBlockToken(); - - blockReader = getBlockReader( - targetAddr, src, blk, - accessToken, - offsetIntoBlock, blk.getNumBytes() - offsetIntoBlock, + blockReader = getBlockReader(targetAddr, chosenNode, src, blk, + accessToken, offsetIntoBlock, blk.getNumBytes() - offsetIntoBlock, buffersize, verifyChecksum, dfsClient.clientName); if(connectFailedOnce) { DFSClient.LOG.info("Successfully connected to " + targetAddr + @@ -543,7 +541,7 @@ public synchronized int read(byte buf[], int off, int len) throws IOException { if (pos > blockEnd) { currentNode = blockSeekTo(pos); } - int realLen = (int) Math.min((long) len, (blockEnd - pos + 1L)); + int realLen = (int) Math.min(len, (blockEnd - pos + 1L)); int result = readBuffer(buf, off, realLen, corruptedBlockMap); if (result >= 0) { @@ -666,12 +664,9 @@ private void fetchBlockByteRange(LocatedBlock block, long start, long end, Token blockToken = block.getBlockToken(); int len = (int) (end - start + 1); - - reader = getBlockReader(targetAddr, src, - block.getBlock(), - blockToken, - start, len, buffersize, - verifyChecksum, dfsClient.clientName); + reader = getBlockReader(targetAddr, chosenNode, src, block.getBlock(), + blockToken, start, len, buffersize, verifyChecksum, + dfsClient.clientName); int nread = reader.readAll(buf, offset, len); if (nread != len) { throw new IOException("truncated return from reader.read(): " + @@ -684,6 +679,10 @@ private void fetchBlockByteRange(LocatedBlock block, long start, long end, e.getPos() + " from " + chosenNode.getName()); // we want to remember what we have tried addIntoCorruptedBlockMap(block.getBlock(), chosenNode, corruptedBlockMap); + } catch (AccessControlException ex) { + DFSClient.LOG.warn("Short circuit access failed ", ex); + dfsClient.disableShortCircuit(); + continue; } catch (IOException e) { if (e instanceof InvalidBlockTokenException && refetchToken > 0) { DFSClient.LOG.info("Will get a new access token and retry, " @@ -726,6 +725,7 @@ private void closeBlockReader(BlockReader reader) throws IOException { * Otherwise, it will create a new connection. * * @param dnAddr Address of the datanode + * @param chosenNode Chosen datanode information * @param file File location * @param block The Block object * @param blockToken The access token for security @@ -737,6 +737,7 @@ private void closeBlockReader(BlockReader reader) throws IOException { * @return New BlockReader instance */ protected BlockReader getBlockReader(InetSocketAddress dnAddr, + DatanodeInfo chosenNode, String file, ExtendedBlock block, Token blockToken, @@ -746,6 +747,12 @@ protected BlockReader getBlockReader(InetSocketAddress dnAddr, boolean verifyChecksum, String clientName) throws IOException { + + if (dfsClient.shouldTryShortCircuitRead(dnAddr)) { + return DFSClient.getLocalBlockReader(dfsClient.conf, src, block, + blockToken, chosenNode, dfsClient.hdfsTimeout, startOffset); + } + IOException err = null; boolean fromCache = true; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java index 364fe0b3f3..f35f491039 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java @@ -45,6 +45,7 @@ import org.apache.hadoop.hdfs.protocol.LocatedBlock; import org.apache.hadoop.hdfs.protocol.LocatedBlocks; import org.apache.hadoop.hdfs.server.namenode.NameNode; +import org.apache.hadoop.ipc.RPC; import org.apache.hadoop.net.NetUtils; import org.apache.hadoop.net.NodeBase; import org.apache.hadoop.security.UserGroupInformation; @@ -723,6 +724,14 @@ public static boolean isFederationEnabled(Configuration conf) { return collection != null && collection.size() != 0; } + /** Create {@link ClientDatanodeProtocol} proxy using kerberos ticket */ + static ClientDatanodeProtocol createClientDatanodeProtocolProxy( + DatanodeID datanodeid, Configuration conf, int socketTimeout) + throws IOException { + return new org.apache.hadoop.hdfs.protocolR23Compatible.ClientDatanodeProtocolTranslatorR23( + datanodeid, conf, socketTimeout); + } + /** Create a {@link ClientDatanodeProtocol} proxy */ public static ClientDatanodeProtocol createClientDatanodeProtocolProxy( InetSocketAddress addr, UserGroupInformation ticket, Configuration conf, diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/RemoteBlockReader.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/RemoteBlockReader.java index 44b35b4022..b7da8d4d8f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/RemoteBlockReader.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/RemoteBlockReader.java @@ -24,7 +24,6 @@ import java.io.DataInputStream; import java.io.DataOutputStream; import java.io.IOException; -import java.io.OutputStream; import java.net.InetSocketAddress; import java.net.Socket; import java.nio.ByteBuffer; @@ -37,12 +36,9 @@ import org.apache.hadoop.hdfs.protocol.datatransfer.PacketHeader; import org.apache.hadoop.hdfs.protocol.datatransfer.Sender; import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto; -import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientReadStatusProto; -import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto; import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto; import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status; import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier; -import org.apache.hadoop.hdfs.security.token.block.InvalidBlockTokenException; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants; import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.net.NetUtils; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/RemoteBlockReader2.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/RemoteBlockReader2.java index 2b2f77ecb4..1f5f12bda7 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/RemoteBlockReader2.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/RemoteBlockReader2.java @@ -85,7 +85,7 @@ public class RemoteBlockReader2 implements BlockReader { Socket dnSock; //for now just sending the status code (e.g. checksumOk) after the read. private ReadableByteChannel in; - private DataChecksum checksum; + protected DataChecksum checksum; private PacketHeader curHeader; private ByteBuffer curPacketBuf = null; @@ -96,25 +96,25 @@ public class RemoteBlockReader2 implements BlockReader { private long lastSeqNo = -1; /** offset in block where reader wants to actually read */ - private long startOffset; - private final String filename; + protected long startOffset; + protected final String filename; - private static DirectBufferPool bufferPool = + protected static DirectBufferPool bufferPool = new DirectBufferPool(); private ByteBuffer headerBuf = ByteBuffer.allocate( PacketHeader.PKT_HEADER_LEN); - private int bytesPerChecksum; - private int checksumSize; + protected int bytesPerChecksum; + protected int checksumSize; /** * The total number of bytes we need to transfer from the DN. * This is the amount that the user has requested plus some padding * at the beginning so that the read can begin on a chunk boundary. */ - private long bytesNeededToFinish; + protected long bytesNeededToFinish; - private final boolean verifyChecksum; + protected final boolean verifyChecksum; private boolean sentStatusCode = false; @@ -271,7 +271,7 @@ private void readTrailingEmptyPacket() throws IOException { } } - private RemoteBlockReader2(String file, String bpid, long blockId, + protected RemoteBlockReader2(String file, String bpid, long blockId, ReadableByteChannel in, DataChecksum checksum, boolean verifyChecksum, long startOffset, long firstChunkOffset, long bytesToRead, Socket dnSock) { // Path is used only for printing block and file information in debug diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/BlockLocalPathInfo.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/BlockLocalPathInfo.java new file mode 100644 index 0000000000..a1823b3aad --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/BlockLocalPathInfo.java @@ -0,0 +1,97 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs.protocol; + +import java.io.DataInput; +import java.io.DataOutput; +import java.io.IOException; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.io.Text; +import org.apache.hadoop.io.Writable; +import org.apache.hadoop.io.WritableFactories; +import org.apache.hadoop.io.WritableFactory; + +/** + * A block and the full path information to the block data file and + * the metadata file stored on the local file system. + */ +@InterfaceAudience.Private +@InterfaceStability.Evolving +public class BlockLocalPathInfo implements Writable { + static final WritableFactory FACTORY = new WritableFactory() { + public Writable newInstance() { return new BlockLocalPathInfo(); } + }; + static { // register a ctor + WritableFactories.setFactory(BlockLocalPathInfo.class, FACTORY); + } + + private ExtendedBlock block; + private String localBlockPath = ""; // local file storing the data + private String localMetaPath = ""; // local file storing the checksum + + public BlockLocalPathInfo() {} + + /** + * Constructs BlockLocalPathInfo. + * @param b The block corresponding to this lock path info. + * @param file Block data file. + * @param metafile Metadata file for the block. + */ + public BlockLocalPathInfo(ExtendedBlock b, String file, String metafile) { + block = b; + localBlockPath = file; + localMetaPath = metafile; + } + + /** + * Get the Block data file. + * @return Block data file. + */ + public String getBlockPath() {return localBlockPath;} + + /** + * Get the Block metadata file. + * @return Block metadata file. + */ + public String getMetaPath() {return localMetaPath;} + + @Override + public void write(DataOutput out) throws IOException { + block.write(out); + Text.writeString(out, localBlockPath); + Text.writeString(out, localMetaPath); + } + + @Override + public void readFields(DataInput in) throws IOException { + block = new ExtendedBlock(); + block.readFields(in); + localBlockPath = Text.readString(in); + localMetaPath = Text.readString(in); + } + + /** + * Get number of bytes in the block. + * @return Number of bytes in the block. + */ + public long getNumBytes() { + return block.getNumBytes(); + } +} diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientDatanodeProtocol.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientDatanodeProtocol.java index a4b4933b8f..c0efa52ec5 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientDatanodeProtocol.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientDatanodeProtocol.java @@ -19,14 +19,14 @@ import java.io.IOException; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.hdfs.DFSConfigKeys; +import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier; import org.apache.hadoop.hdfs.security.token.block.BlockTokenSelector; import org.apache.hadoop.ipc.VersionedProtocol; import org.apache.hadoop.security.KerberosInfo; +import org.apache.hadoop.security.token.Token; import org.apache.hadoop.security.token.TokenInfo; /** An client-datanode protocol for block recovery @@ -84,5 +84,30 @@ public interface ClientDatanodeProtocol extends VersionedProtocol { * deleted along with its contents. * @throws IOException */ - void deleteBlockPool(String bpid, boolean force) throws IOException; + void deleteBlockPool(String bpid, boolean force) throws IOException; + + /** + * Retrieves the path names of the block file and metadata file stored on the + * local file system. + * + * In order for this method to work, one of the following should be satisfied: + *
    + *
  • + * The client user must be configured at the datanode to be able to use this + * method.
  • + *
  • + * When security is enabled, kerberos authentication must be used to connect + * to the datanode.
  • + *
+ * + * @param block + * the specified block on the local datanode + * @param token + * the block access token. + * @return the BlockLocalPathInfo of a block + * @throws IOException + * on error + */ + BlockLocalPathInfo getBlockLocalPathInfo(ExtendedBlock block, + Token token) throws IOException; } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java index 1b7d98528b..04c9be8473 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java @@ -852,9 +852,9 @@ public void setTimes(String src, long mtime, long atime) /** * Create symlink to a file or directory. - * @param target The pathname of the destination that the + * @param target The path of the destination that the * link points to. - * @param link The pathname of the link being created. + * @param link The path of the link being created. * @param dirPerm permissions to use when creating parent directories * @param createParent - if true then missing parent dirs are created * if false then parent must exist @@ -875,14 +875,16 @@ public void createSymlink(String target, String link, FsPermission dirPerm, IOException; /** - * Resolve the first symbolic link on the specified path. - * @param path The pathname that needs to be resolved - * - * @return The pathname after resolving the first symbolic link if any. - * + * Return the target of the given symlink. If there is an intermediate + * symlink in the path (ie a symlink leading up to the final path component) + * then the given path is returned with this symlink resolved. + * + * @param path The path with a link that needs resolution. + * @return The path after resolving the first symbolic link in the path. * @throws AccessControlException permission denied * @throws FileNotFoundException If path does not exist - * @throws IOException If an I/O error occurred + * @throws IOException If the given path does not refer to a symlink + * or an I/O error occurred */ @Idempotent public String getLinkTarget(String path) throws AccessControlException, diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/UnresolvedPathException.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/UnresolvedPathException.java index 9726ab70a4..03fb704934 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/UnresolvedPathException.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/UnresolvedPathException.java @@ -32,10 +32,10 @@ @InterfaceStability.Evolving public final class UnresolvedPathException extends UnresolvedLinkException { private static final long serialVersionUID = 1L; - private String originalPath; // The original path containing the link - private String linkTarget; // The target of the link - private String remainingPath; // The path part following the link - + private String path; // The path containing the link + private String preceding; // The path part preceding the link + private String remainder; // The path part following the link + private String linkTarget; // The link's target /** * Used by RemoteException to instantiate an UnresolvedPathException. @@ -44,22 +44,30 @@ public UnresolvedPathException(String msg) { super(msg); } - public UnresolvedPathException(String originalPath, String remainingPath, - String linkTarget) { - this.originalPath = originalPath; - this.remainingPath = remainingPath; - this.linkTarget = linkTarget; + public UnresolvedPathException(String path, String preceding, + String remainder, String linkTarget) { + this.path = path; + this.preceding = preceding; + this.remainder = remainder; + this.linkTarget = linkTarget; } - public Path getUnresolvedPath() throws IOException { - return new Path(originalPath); - } - + /** + * Return a path with the link resolved with the target. + */ public Path getResolvedPath() throws IOException { - if (remainingPath == null || "".equals(remainingPath)) { - return new Path(linkTarget); + // If the path is absolute we cam throw out the preceding part and + // just append the remainder to the target, otherwise append each + // piece to resolve the link in path. + boolean noRemainder = (remainder == null || "".equals(remainder)); + Path target = new Path(linkTarget); + if (target.isUriPathAbsolute()) { + return noRemainder ? target : new Path(target, remainder); + } else { + return noRemainder + ? new Path(preceding, target) + : new Path(new Path(preceding, linkTarget), remainder); } - return new Path(linkTarget, remainingPath); } @Override @@ -68,7 +76,7 @@ public String getMessage() { if (msg != null) { return msg; } - String myMsg = "Unresolved path " + originalPath; + String myMsg = "Unresolved path " + path; try { return getResolvedPath().toString(); } catch (IOException e) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/proto/HdfsProtos.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/proto/HdfsProtos.java index 829495db81..769794c861 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/proto/HdfsProtos.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/proto/HdfsProtos.java @@ -1,20 +1,3 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ // Generated by the protocol buffer compiler. DO NOT EDIT! // source: hdfs.proto @@ -25,6 +8,84 @@ private HdfsProtos() {} public static void registerAllExtensions( com.google.protobuf.ExtensionRegistry registry) { } + public enum ReplicaState + implements com.google.protobuf.ProtocolMessageEnum { + FINALIZED(0, 0), + RBW(1, 1), + RWR(2, 2), + RUR(3, 3), + TEMPORARY(4, 4), + ; + + public static final int FINALIZED_VALUE = 0; + public static final int RBW_VALUE = 1; + public static final int RWR_VALUE = 2; + public static final int RUR_VALUE = 3; + public static final int TEMPORARY_VALUE = 4; + + + public final int getNumber() { return value; } + + public static ReplicaState valueOf(int value) { + switch (value) { + case 0: return FINALIZED; + case 1: return RBW; + case 2: return RWR; + case 3: return RUR; + case 4: return TEMPORARY; + default: return null; + } + } + + public static com.google.protobuf.Internal.EnumLiteMap + internalGetValueMap() { + return internalValueMap; + } + private static com.google.protobuf.Internal.EnumLiteMap + internalValueMap = + new com.google.protobuf.Internal.EnumLiteMap() { + public ReplicaState findValueByNumber(int number) { + return ReplicaState.valueOf(number); + } + }; + + public final com.google.protobuf.Descriptors.EnumValueDescriptor + getValueDescriptor() { + return getDescriptor().getValues().get(index); + } + public final com.google.protobuf.Descriptors.EnumDescriptor + getDescriptorForType() { + return getDescriptor(); + } + public static final com.google.protobuf.Descriptors.EnumDescriptor + getDescriptor() { + return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.getDescriptor().getEnumTypes().get(0); + } + + private static final ReplicaState[] VALUES = { + FINALIZED, RBW, RWR, RUR, TEMPORARY, + }; + + public static ReplicaState valueOf( + com.google.protobuf.Descriptors.EnumValueDescriptor desc) { + if (desc.getType() != getDescriptor()) { + throw new java.lang.IllegalArgumentException( + "EnumValueDescriptor is not for this type."); + } + return VALUES[desc.getIndex()]; + } + + private final int index; + private final int value; + + private ReplicaState(int index, int value) { + this.index = index; + this.value = value; + } + + // @@protoc_insertion_point(enum_scope:ReplicaState) + } + public interface ExtendedBlockProtoOrBuilder extends com.google.protobuf.MessageOrBuilder { @@ -2011,6 +2072,599 @@ public Builder clearIpcPort() { // @@protoc_insertion_point(class_scope:DatanodeIDProto) } + public interface DatanodeIDsProtoOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // repeated .DatanodeIDProto datanodes = 1; + java.util.List + getDatanodesList(); + org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto getDatanodes(int index); + int getDatanodesCount(); + java.util.List + getDatanodesOrBuilderList(); + org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProtoOrBuilder getDatanodesOrBuilder( + int index); + } + public static final class DatanodeIDsProto extends + com.google.protobuf.GeneratedMessage + implements DatanodeIDsProtoOrBuilder { + // Use DatanodeIDsProto.newBuilder() to construct. + private DatanodeIDsProto(Builder builder) { + super(builder); + } + private DatanodeIDsProto(boolean noInit) {} + + private static final DatanodeIDsProto defaultInstance; + public static DatanodeIDsProto getDefaultInstance() { + return defaultInstance; + } + + public DatanodeIDsProto getDefaultInstanceForType() { + return defaultInstance; + } + + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_DatanodeIDsProto_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_DatanodeIDsProto_fieldAccessorTable; + } + + // repeated .DatanodeIDProto datanodes = 1; + public static final int DATANODES_FIELD_NUMBER = 1; + private java.util.List datanodes_; + public java.util.List getDatanodesList() { + return datanodes_; + } + public java.util.List + getDatanodesOrBuilderList() { + return datanodes_; + } + public int getDatanodesCount() { + return datanodes_.size(); + } + public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto getDatanodes(int index) { + return datanodes_.get(index); + } + public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProtoOrBuilder getDatanodesOrBuilder( + int index) { + return datanodes_.get(index); + } + + private void initFields() { + datanodes_ = java.util.Collections.emptyList(); + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + for (int i = 0; i < getDatanodesCount(); i++) { + if (!getDatanodes(i).isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + for (int i = 0; i < datanodes_.size(); i++) { + output.writeMessage(1, datanodes_.get(i)); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + for (int i = 0; i < datanodes_.size(); i++) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(1, datanodes_.get(i)); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDsProto)) { + return super.equals(obj); + } + org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDsProto other = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDsProto) obj; + + boolean result = true; + result = result && getDatanodesList() + .equals(other.getDatanodesList()); + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + @java.lang.Override + public int hashCode() { + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + if (getDatanodesCount() > 0) { + hash = (37 * hash) + DATANODES_FIELD_NUMBER; + hash = (53 * hash) + getDatanodesList().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + return hash; + } + + public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDsProto parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data).buildParsed(); + } + public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDsProto parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data, extensionRegistry) + .buildParsed(); + } + public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDsProto parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data).buildParsed(); + } + public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDsProto parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data, extensionRegistry) + .buildParsed(); + } + public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDsProto parseFrom(java.io.InputStream input) + throws java.io.IOException { + return newBuilder().mergeFrom(input).buildParsed(); + } + public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDsProto parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return newBuilder().mergeFrom(input, extensionRegistry) + .buildParsed(); + } + public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDsProto parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + Builder builder = newBuilder(); + if (builder.mergeDelimitedFrom(input)) { + return builder.buildParsed(); + } else { + return null; + } + } + public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDsProto parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + Builder builder = newBuilder(); + if (builder.mergeDelimitedFrom(input, extensionRegistry)) { + return builder.buildParsed(); + } else { + return null; + } + } + public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDsProto parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return newBuilder().mergeFrom(input).buildParsed(); + } + public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDsProto parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return newBuilder().mergeFrom(input, extensionRegistry) + .buildParsed(); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDsProto prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDsProtoOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_DatanodeIDsProto_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_DatanodeIDsProto_fieldAccessorTable; + } + + // Construct using org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDsProto.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + getDatanodesFieldBuilder(); + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + if (datanodesBuilder_ == null) { + datanodes_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000001); + } else { + datanodesBuilder_.clear(); + } + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDsProto.getDescriptor(); + } + + public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDsProto getDefaultInstanceForType() { + return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDsProto.getDefaultInstance(); + } + + public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDsProto build() { + org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDsProto result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDsProto buildParsed() + throws com.google.protobuf.InvalidProtocolBufferException { + org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDsProto result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException( + result).asInvalidProtocolBufferException(); + } + return result; + } + + public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDsProto buildPartial() { + org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDsProto result = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDsProto(this); + int from_bitField0_ = bitField0_; + if (datanodesBuilder_ == null) { + if (((bitField0_ & 0x00000001) == 0x00000001)) { + datanodes_ = java.util.Collections.unmodifiableList(datanodes_); + bitField0_ = (bitField0_ & ~0x00000001); + } + result.datanodes_ = datanodes_; + } else { + result.datanodes_ = datanodesBuilder_.build(); + } + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDsProto) { + return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDsProto)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDsProto other) { + if (other == org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDsProto.getDefaultInstance()) return this; + if (datanodesBuilder_ == null) { + if (!other.datanodes_.isEmpty()) { + if (datanodes_.isEmpty()) { + datanodes_ = other.datanodes_; + bitField0_ = (bitField0_ & ~0x00000001); + } else { + ensureDatanodesIsMutable(); + datanodes_.addAll(other.datanodes_); + } + onChanged(); + } + } else { + if (!other.datanodes_.isEmpty()) { + if (datanodesBuilder_.isEmpty()) { + datanodesBuilder_.dispose(); + datanodesBuilder_ = null; + datanodes_ = other.datanodes_; + bitField0_ = (bitField0_ & ~0x00000001); + datanodesBuilder_ = + com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ? + getDatanodesFieldBuilder() : null; + } else { + datanodesBuilder_.addAllMessages(other.datanodes_); + } + } + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + for (int i = 0; i < getDatanodesCount(); i++) { + if (!getDatanodes(i).isInitialized()) { + + return false; + } + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder( + this.getUnknownFields()); + while (true) { + int tag = input.readTag(); + switch (tag) { + case 0: + this.setUnknownFields(unknownFields.build()); + onChanged(); + return this; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + this.setUnknownFields(unknownFields.build()); + onChanged(); + return this; + } + break; + } + case 10: { + org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.Builder subBuilder = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.newBuilder(); + input.readMessage(subBuilder, extensionRegistry); + addDatanodes(subBuilder.buildPartial()); + break; + } + } + } + } + + private int bitField0_; + + // repeated .DatanodeIDProto datanodes = 1; + private java.util.List datanodes_ = + java.util.Collections.emptyList(); + private void ensureDatanodesIsMutable() { + if (!((bitField0_ & 0x00000001) == 0x00000001)) { + datanodes_ = new java.util.ArrayList(datanodes_); + bitField0_ |= 0x00000001; + } + } + + private com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProtoOrBuilder> datanodesBuilder_; + + public java.util.List getDatanodesList() { + if (datanodesBuilder_ == null) { + return java.util.Collections.unmodifiableList(datanodes_); + } else { + return datanodesBuilder_.getMessageList(); + } + } + public int getDatanodesCount() { + if (datanodesBuilder_ == null) { + return datanodes_.size(); + } else { + return datanodesBuilder_.getCount(); + } + } + public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto getDatanodes(int index) { + if (datanodesBuilder_ == null) { + return datanodes_.get(index); + } else { + return datanodesBuilder_.getMessage(index); + } + } + public Builder setDatanodes( + int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto value) { + if (datanodesBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureDatanodesIsMutable(); + datanodes_.set(index, value); + onChanged(); + } else { + datanodesBuilder_.setMessage(index, value); + } + return this; + } + public Builder setDatanodes( + int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.Builder builderForValue) { + if (datanodesBuilder_ == null) { + ensureDatanodesIsMutable(); + datanodes_.set(index, builderForValue.build()); + onChanged(); + } else { + datanodesBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + public Builder addDatanodes(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto value) { + if (datanodesBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureDatanodesIsMutable(); + datanodes_.add(value); + onChanged(); + } else { + datanodesBuilder_.addMessage(value); + } + return this; + } + public Builder addDatanodes( + int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto value) { + if (datanodesBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureDatanodesIsMutable(); + datanodes_.add(index, value); + onChanged(); + } else { + datanodesBuilder_.addMessage(index, value); + } + return this; + } + public Builder addDatanodes( + org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.Builder builderForValue) { + if (datanodesBuilder_ == null) { + ensureDatanodesIsMutable(); + datanodes_.add(builderForValue.build()); + onChanged(); + } else { + datanodesBuilder_.addMessage(builderForValue.build()); + } + return this; + } + public Builder addDatanodes( + int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.Builder builderForValue) { + if (datanodesBuilder_ == null) { + ensureDatanodesIsMutable(); + datanodes_.add(index, builderForValue.build()); + onChanged(); + } else { + datanodesBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + public Builder addAllDatanodes( + java.lang.Iterable values) { + if (datanodesBuilder_ == null) { + ensureDatanodesIsMutable(); + super.addAll(values, datanodes_); + onChanged(); + } else { + datanodesBuilder_.addAllMessages(values); + } + return this; + } + public Builder clearDatanodes() { + if (datanodesBuilder_ == null) { + datanodes_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + } else { + datanodesBuilder_.clear(); + } + return this; + } + public Builder removeDatanodes(int index) { + if (datanodesBuilder_ == null) { + ensureDatanodesIsMutable(); + datanodes_.remove(index); + onChanged(); + } else { + datanodesBuilder_.remove(index); + } + return this; + } + public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.Builder getDatanodesBuilder( + int index) { + return getDatanodesFieldBuilder().getBuilder(index); + } + public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProtoOrBuilder getDatanodesOrBuilder( + int index) { + if (datanodesBuilder_ == null) { + return datanodes_.get(index); } else { + return datanodesBuilder_.getMessageOrBuilder(index); + } + } + public java.util.List + getDatanodesOrBuilderList() { + if (datanodesBuilder_ != null) { + return datanodesBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(datanodes_); + } + } + public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.Builder addDatanodesBuilder() { + return getDatanodesFieldBuilder().addBuilder( + org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.getDefaultInstance()); + } + public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.Builder addDatanodesBuilder( + int index) { + return getDatanodesFieldBuilder().addBuilder( + index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.getDefaultInstance()); + } + public java.util.List + getDatanodesBuilderList() { + return getDatanodesFieldBuilder().getBuilderList(); + } + private com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProtoOrBuilder> + getDatanodesFieldBuilder() { + if (datanodesBuilder_ == null) { + datanodesBuilder_ = new com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProtoOrBuilder>( + datanodes_, + ((bitField0_ & 0x00000001) == 0x00000001), + getParentForChildren(), + isClean()); + datanodes_ = null; + } + return datanodesBuilder_; + } + + // @@protoc_insertion_point(builder_scope:DatanodeIDsProto) + } + + static { + defaultInstance = new DatanodeIDsProto(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:DatanodeIDsProto) + } + public interface DatanodeInfoProtoOrBuilder extends com.google.protobuf.MessageOrBuilder { @@ -10268,6 +10922,9214 @@ public Builder clearUpgradeStatus() { // @@protoc_insertion_point(class_scope:UpgradeStatusReportProto) } + public interface StorageInfoProtoOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // required uint32 layoutVersion = 1; + boolean hasLayoutVersion(); + int getLayoutVersion(); + + // required uint32 namespceID = 2; + boolean hasNamespceID(); + int getNamespceID(); + + // required string clusterID = 3; + boolean hasClusterID(); + String getClusterID(); + + // required uint64 cTime = 4; + boolean hasCTime(); + long getCTime(); + } + public static final class StorageInfoProto extends + com.google.protobuf.GeneratedMessage + implements StorageInfoProtoOrBuilder { + // Use StorageInfoProto.newBuilder() to construct. + private StorageInfoProto(Builder builder) { + super(builder); + } + private StorageInfoProto(boolean noInit) {} + + private static final StorageInfoProto defaultInstance; + public static StorageInfoProto getDefaultInstance() { + return defaultInstance; + } + + public StorageInfoProto getDefaultInstanceForType() { + return defaultInstance; + } + + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_StorageInfoProto_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_StorageInfoProto_fieldAccessorTable; + } + + private int bitField0_; + // required uint32 layoutVersion = 1; + public static final int LAYOUTVERSION_FIELD_NUMBER = 1; + private int layoutVersion_; + public boolean hasLayoutVersion() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + public int getLayoutVersion() { + return layoutVersion_; + } + + // required uint32 namespceID = 2; + public static final int NAMESPCEID_FIELD_NUMBER = 2; + private int namespceID_; + public boolean hasNamespceID() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + public int getNamespceID() { + return namespceID_; + } + + // required string clusterID = 3; + public static final int CLUSTERID_FIELD_NUMBER = 3; + private java.lang.Object clusterID_; + public boolean hasClusterID() { + return ((bitField0_ & 0x00000004) == 0x00000004); + } + public String getClusterID() { + java.lang.Object ref = clusterID_; + if (ref instanceof String) { + return (String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + String s = bs.toStringUtf8(); + if (com.google.protobuf.Internal.isValidUtf8(bs)) { + clusterID_ = s; + } + return s; + } + } + private com.google.protobuf.ByteString getClusterIDBytes() { + java.lang.Object ref = clusterID_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((String) ref); + clusterID_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + // required uint64 cTime = 4; + public static final int CTIME_FIELD_NUMBER = 4; + private long cTime_; + public boolean hasCTime() { + return ((bitField0_ & 0x00000008) == 0x00000008); + } + public long getCTime() { + return cTime_; + } + + private void initFields() { + layoutVersion_ = 0; + namespceID_ = 0; + clusterID_ = ""; + cTime_ = 0L; + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + if (!hasLayoutVersion()) { + memoizedIsInitialized = 0; + return false; + } + if (!hasNamespceID()) { + memoizedIsInitialized = 0; + return false; + } + if (!hasClusterID()) { + memoizedIsInitialized = 0; + return false; + } + if (!hasCTime()) { + memoizedIsInitialized = 0; + return false; + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeUInt32(1, layoutVersion_); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + output.writeUInt32(2, namespceID_); + } + if (((bitField0_ & 0x00000004) == 0x00000004)) { + output.writeBytes(3, getClusterIDBytes()); + } + if (((bitField0_ & 0x00000008) == 0x00000008)) { + output.writeUInt64(4, cTime_); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeUInt32Size(1, layoutVersion_); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + size += com.google.protobuf.CodedOutputStream + .computeUInt32Size(2, namespceID_); + } + if (((bitField0_ & 0x00000004) == 0x00000004)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(3, getClusterIDBytes()); + } + if (((bitField0_ & 0x00000008) == 0x00000008)) { + size += com.google.protobuf.CodedOutputStream + .computeUInt64Size(4, cTime_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto)) { + return super.equals(obj); + } + org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto other = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto) obj; + + boolean result = true; + result = result && (hasLayoutVersion() == other.hasLayoutVersion()); + if (hasLayoutVersion()) { + result = result && (getLayoutVersion() + == other.getLayoutVersion()); + } + result = result && (hasNamespceID() == other.hasNamespceID()); + if (hasNamespceID()) { + result = result && (getNamespceID() + == other.getNamespceID()); + } + result = result && (hasClusterID() == other.hasClusterID()); + if (hasClusterID()) { + result = result && getClusterID() + .equals(other.getClusterID()); + } + result = result && (hasCTime() == other.hasCTime()); + if (hasCTime()) { + result = result && (getCTime() + == other.getCTime()); + } + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + @java.lang.Override + public int hashCode() { + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + if (hasLayoutVersion()) { + hash = (37 * hash) + LAYOUTVERSION_FIELD_NUMBER; + hash = (53 * hash) + getLayoutVersion(); + } + if (hasNamespceID()) { + hash = (37 * hash) + NAMESPCEID_FIELD_NUMBER; + hash = (53 * hash) + getNamespceID(); + } + if (hasClusterID()) { + hash = (37 * hash) + CLUSTERID_FIELD_NUMBER; + hash = (53 * hash) + getClusterID().hashCode(); + } + if (hasCTime()) { + hash = (37 * hash) + CTIME_FIELD_NUMBER; + hash = (53 * hash) + hashLong(getCTime()); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + return hash; + } + + public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data).buildParsed(); + } + public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data, extensionRegistry) + .buildParsed(); + } + public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data).buildParsed(); + } + public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data, extensionRegistry) + .buildParsed(); + } + public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto parseFrom(java.io.InputStream input) + throws java.io.IOException { + return newBuilder().mergeFrom(input).buildParsed(); + } + public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return newBuilder().mergeFrom(input, extensionRegistry) + .buildParsed(); + } + public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + Builder builder = newBuilder(); + if (builder.mergeDelimitedFrom(input)) { + return builder.buildParsed(); + } else { + return null; + } + } + public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + Builder builder = newBuilder(); + if (builder.mergeDelimitedFrom(input, extensionRegistry)) { + return builder.buildParsed(); + } else { + return null; + } + } + public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return newBuilder().mergeFrom(input).buildParsed(); + } + public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return newBuilder().mergeFrom(input, extensionRegistry) + .buildParsed(); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProtoOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_StorageInfoProto_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_StorageInfoProto_fieldAccessorTable; + } + + // Construct using org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + layoutVersion_ = 0; + bitField0_ = (bitField0_ & ~0x00000001); + namespceID_ = 0; + bitField0_ = (bitField0_ & ~0x00000002); + clusterID_ = ""; + bitField0_ = (bitField0_ & ~0x00000004); + cTime_ = 0L; + bitField0_ = (bitField0_ & ~0x00000008); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto.getDescriptor(); + } + + public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto getDefaultInstanceForType() { + return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto.getDefaultInstance(); + } + + public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto build() { + org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto buildParsed() + throws com.google.protobuf.InvalidProtocolBufferException { + org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException( + result).asInvalidProtocolBufferException(); + } + return result; + } + + public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto buildPartial() { + org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto result = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + result.layoutVersion_ = layoutVersion_; + if (((from_bitField0_ & 0x00000002) == 0x00000002)) { + to_bitField0_ |= 0x00000002; + } + result.namespceID_ = namespceID_; + if (((from_bitField0_ & 0x00000004) == 0x00000004)) { + to_bitField0_ |= 0x00000004; + } + result.clusterID_ = clusterID_; + if (((from_bitField0_ & 0x00000008) == 0x00000008)) { + to_bitField0_ |= 0x00000008; + } + result.cTime_ = cTime_; + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto) { + return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto other) { + if (other == org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto.getDefaultInstance()) return this; + if (other.hasLayoutVersion()) { + setLayoutVersion(other.getLayoutVersion()); + } + if (other.hasNamespceID()) { + setNamespceID(other.getNamespceID()); + } + if (other.hasClusterID()) { + setClusterID(other.getClusterID()); + } + if (other.hasCTime()) { + setCTime(other.getCTime()); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + if (!hasLayoutVersion()) { + + return false; + } + if (!hasNamespceID()) { + + return false; + } + if (!hasClusterID()) { + + return false; + } + if (!hasCTime()) { + + return false; + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder( + this.getUnknownFields()); + while (true) { + int tag = input.readTag(); + switch (tag) { + case 0: + this.setUnknownFields(unknownFields.build()); + onChanged(); + return this; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + this.setUnknownFields(unknownFields.build()); + onChanged(); + return this; + } + break; + } + case 8: { + bitField0_ |= 0x00000001; + layoutVersion_ = input.readUInt32(); + break; + } + case 16: { + bitField0_ |= 0x00000002; + namespceID_ = input.readUInt32(); + break; + } + case 26: { + bitField0_ |= 0x00000004; + clusterID_ = input.readBytes(); + break; + } + case 32: { + bitField0_ |= 0x00000008; + cTime_ = input.readUInt64(); + break; + } + } + } + } + + private int bitField0_; + + // required uint32 layoutVersion = 1; + private int layoutVersion_ ; + public boolean hasLayoutVersion() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + public int getLayoutVersion() { + return layoutVersion_; + } + public Builder setLayoutVersion(int value) { + bitField0_ |= 0x00000001; + layoutVersion_ = value; + onChanged(); + return this; + } + public Builder clearLayoutVersion() { + bitField0_ = (bitField0_ & ~0x00000001); + layoutVersion_ = 0; + onChanged(); + return this; + } + + // required uint32 namespceID = 2; + private int namespceID_ ; + public boolean hasNamespceID() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + public int getNamespceID() { + return namespceID_; + } + public Builder setNamespceID(int value) { + bitField0_ |= 0x00000002; + namespceID_ = value; + onChanged(); + return this; + } + public Builder clearNamespceID() { + bitField0_ = (bitField0_ & ~0x00000002); + namespceID_ = 0; + onChanged(); + return this; + } + + // required string clusterID = 3; + private java.lang.Object clusterID_ = ""; + public boolean hasClusterID() { + return ((bitField0_ & 0x00000004) == 0x00000004); + } + public String getClusterID() { + java.lang.Object ref = clusterID_; + if (!(ref instanceof String)) { + String s = ((com.google.protobuf.ByteString) ref).toStringUtf8(); + clusterID_ = s; + return s; + } else { + return (String) ref; + } + } + public Builder setClusterID(String value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000004; + clusterID_ = value; + onChanged(); + return this; + } + public Builder clearClusterID() { + bitField0_ = (bitField0_ & ~0x00000004); + clusterID_ = getDefaultInstance().getClusterID(); + onChanged(); + return this; + } + void setClusterID(com.google.protobuf.ByteString value) { + bitField0_ |= 0x00000004; + clusterID_ = value; + onChanged(); + } + + // required uint64 cTime = 4; + private long cTime_ ; + public boolean hasCTime() { + return ((bitField0_ & 0x00000008) == 0x00000008); + } + public long getCTime() { + return cTime_; + } + public Builder setCTime(long value) { + bitField0_ |= 0x00000008; + cTime_ = value; + onChanged(); + return this; + } + public Builder clearCTime() { + bitField0_ = (bitField0_ & ~0x00000008); + cTime_ = 0L; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:StorageInfoProto) + } + + static { + defaultInstance = new StorageInfoProto(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:StorageInfoProto) + } + + public interface NamenodeRegistrationProtoOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // required string rpcAddress = 1; + boolean hasRpcAddress(); + String getRpcAddress(); + + // required string httpAddress = 2; + boolean hasHttpAddress(); + String getHttpAddress(); + + // required .StorageInfoProto storageInfo = 3; + boolean hasStorageInfo(); + org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto getStorageInfo(); + org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProtoOrBuilder getStorageInfoOrBuilder(); + + // optional .NamenodeRegistrationProto.NamenodeRoleProto role = 4; + boolean hasRole(); + org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto.NamenodeRoleProto getRole(); + } + public static final class NamenodeRegistrationProto extends + com.google.protobuf.GeneratedMessage + implements NamenodeRegistrationProtoOrBuilder { + // Use NamenodeRegistrationProto.newBuilder() to construct. + private NamenodeRegistrationProto(Builder builder) { + super(builder); + } + private NamenodeRegistrationProto(boolean noInit) {} + + private static final NamenodeRegistrationProto defaultInstance; + public static NamenodeRegistrationProto getDefaultInstance() { + return defaultInstance; + } + + public NamenodeRegistrationProto getDefaultInstanceForType() { + return defaultInstance; + } + + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_NamenodeRegistrationProto_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_NamenodeRegistrationProto_fieldAccessorTable; + } + + public enum NamenodeRoleProto + implements com.google.protobuf.ProtocolMessageEnum { + NAMENODE(0, 1), + BACKUP(1, 2), + CHECKPOINT(2, 3), + ; + + public static final int NAMENODE_VALUE = 1; + public static final int BACKUP_VALUE = 2; + public static final int CHECKPOINT_VALUE = 3; + + + public final int getNumber() { return value; } + + public static NamenodeRoleProto valueOf(int value) { + switch (value) { + case 1: return NAMENODE; + case 2: return BACKUP; + case 3: return CHECKPOINT; + default: return null; + } + } + + public static com.google.protobuf.Internal.EnumLiteMap + internalGetValueMap() { + return internalValueMap; + } + private static com.google.protobuf.Internal.EnumLiteMap + internalValueMap = + new com.google.protobuf.Internal.EnumLiteMap() { + public NamenodeRoleProto findValueByNumber(int number) { + return NamenodeRoleProto.valueOf(number); + } + }; + + public final com.google.protobuf.Descriptors.EnumValueDescriptor + getValueDescriptor() { + return getDescriptor().getValues().get(index); + } + public final com.google.protobuf.Descriptors.EnumDescriptor + getDescriptorForType() { + return getDescriptor(); + } + public static final com.google.protobuf.Descriptors.EnumDescriptor + getDescriptor() { + return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto.getDescriptor().getEnumTypes().get(0); + } + + private static final NamenodeRoleProto[] VALUES = { + NAMENODE, BACKUP, CHECKPOINT, + }; + + public static NamenodeRoleProto valueOf( + com.google.protobuf.Descriptors.EnumValueDescriptor desc) { + if (desc.getType() != getDescriptor()) { + throw new java.lang.IllegalArgumentException( + "EnumValueDescriptor is not for this type."); + } + return VALUES[desc.getIndex()]; + } + + private final int index; + private final int value; + + private NamenodeRoleProto(int index, int value) { + this.index = index; + this.value = value; + } + + // @@protoc_insertion_point(enum_scope:NamenodeRegistrationProto.NamenodeRoleProto) + } + + private int bitField0_; + // required string rpcAddress = 1; + public static final int RPCADDRESS_FIELD_NUMBER = 1; + private java.lang.Object rpcAddress_; + public boolean hasRpcAddress() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + public String getRpcAddress() { + java.lang.Object ref = rpcAddress_; + if (ref instanceof String) { + return (String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + String s = bs.toStringUtf8(); + if (com.google.protobuf.Internal.isValidUtf8(bs)) { + rpcAddress_ = s; + } + return s; + } + } + private com.google.protobuf.ByteString getRpcAddressBytes() { + java.lang.Object ref = rpcAddress_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((String) ref); + rpcAddress_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + // required string httpAddress = 2; + public static final int HTTPADDRESS_FIELD_NUMBER = 2; + private java.lang.Object httpAddress_; + public boolean hasHttpAddress() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + public String getHttpAddress() { + java.lang.Object ref = httpAddress_; + if (ref instanceof String) { + return (String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + String s = bs.toStringUtf8(); + if (com.google.protobuf.Internal.isValidUtf8(bs)) { + httpAddress_ = s; + } + return s; + } + } + private com.google.protobuf.ByteString getHttpAddressBytes() { + java.lang.Object ref = httpAddress_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((String) ref); + httpAddress_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + // required .StorageInfoProto storageInfo = 3; + public static final int STORAGEINFO_FIELD_NUMBER = 3; + private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto storageInfo_; + public boolean hasStorageInfo() { + return ((bitField0_ & 0x00000004) == 0x00000004); + } + public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto getStorageInfo() { + return storageInfo_; + } + public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProtoOrBuilder getStorageInfoOrBuilder() { + return storageInfo_; + } + + // optional .NamenodeRegistrationProto.NamenodeRoleProto role = 4; + public static final int ROLE_FIELD_NUMBER = 4; + private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto.NamenodeRoleProto role_; + public boolean hasRole() { + return ((bitField0_ & 0x00000008) == 0x00000008); + } + public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto.NamenodeRoleProto getRole() { + return role_; + } + + private void initFields() { + rpcAddress_ = ""; + httpAddress_ = ""; + storageInfo_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto.getDefaultInstance(); + role_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto.NamenodeRoleProto.NAMENODE; + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + if (!hasRpcAddress()) { + memoizedIsInitialized = 0; + return false; + } + if (!hasHttpAddress()) { + memoizedIsInitialized = 0; + return false; + } + if (!hasStorageInfo()) { + memoizedIsInitialized = 0; + return false; + } + if (!getStorageInfo().isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeBytes(1, getRpcAddressBytes()); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + output.writeBytes(2, getHttpAddressBytes()); + } + if (((bitField0_ & 0x00000004) == 0x00000004)) { + output.writeMessage(3, storageInfo_); + } + if (((bitField0_ & 0x00000008) == 0x00000008)) { + output.writeEnum(4, role_.getNumber()); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(1, getRpcAddressBytes()); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(2, getHttpAddressBytes()); + } + if (((bitField0_ & 0x00000004) == 0x00000004)) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(3, storageInfo_); + } + if (((bitField0_ & 0x00000008) == 0x00000008)) { + size += com.google.protobuf.CodedOutputStream + .computeEnumSize(4, role_.getNumber()); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto)) { + return super.equals(obj); + } + org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto other = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto) obj; + + boolean result = true; + result = result && (hasRpcAddress() == other.hasRpcAddress()); + if (hasRpcAddress()) { + result = result && getRpcAddress() + .equals(other.getRpcAddress()); + } + result = result && (hasHttpAddress() == other.hasHttpAddress()); + if (hasHttpAddress()) { + result = result && getHttpAddress() + .equals(other.getHttpAddress()); + } + result = result && (hasStorageInfo() == other.hasStorageInfo()); + if (hasStorageInfo()) { + result = result && getStorageInfo() + .equals(other.getStorageInfo()); + } + result = result && (hasRole() == other.hasRole()); + if (hasRole()) { + result = result && + (getRole() == other.getRole()); + } + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + @java.lang.Override + public int hashCode() { + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + if (hasRpcAddress()) { + hash = (37 * hash) + RPCADDRESS_FIELD_NUMBER; + hash = (53 * hash) + getRpcAddress().hashCode(); + } + if (hasHttpAddress()) { + hash = (37 * hash) + HTTPADDRESS_FIELD_NUMBER; + hash = (53 * hash) + getHttpAddress().hashCode(); + } + if (hasStorageInfo()) { + hash = (37 * hash) + STORAGEINFO_FIELD_NUMBER; + hash = (53 * hash) + getStorageInfo().hashCode(); + } + if (hasRole()) { + hash = (37 * hash) + ROLE_FIELD_NUMBER; + hash = (53 * hash) + hashEnum(getRole()); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + return hash; + } + + public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data).buildParsed(); + } + public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data, extensionRegistry) + .buildParsed(); + } + public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data).buildParsed(); + } + public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data, extensionRegistry) + .buildParsed(); + } + public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto parseFrom(java.io.InputStream input) + throws java.io.IOException { + return newBuilder().mergeFrom(input).buildParsed(); + } + public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return newBuilder().mergeFrom(input, extensionRegistry) + .buildParsed(); + } + public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + Builder builder = newBuilder(); + if (builder.mergeDelimitedFrom(input)) { + return builder.buildParsed(); + } else { + return null; + } + } + public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + Builder builder = newBuilder(); + if (builder.mergeDelimitedFrom(input, extensionRegistry)) { + return builder.buildParsed(); + } else { + return null; + } + } + public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return newBuilder().mergeFrom(input).buildParsed(); + } + public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return newBuilder().mergeFrom(input, extensionRegistry) + .buildParsed(); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProtoOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_NamenodeRegistrationProto_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_NamenodeRegistrationProto_fieldAccessorTable; + } + + // Construct using org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + getStorageInfoFieldBuilder(); + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + rpcAddress_ = ""; + bitField0_ = (bitField0_ & ~0x00000001); + httpAddress_ = ""; + bitField0_ = (bitField0_ & ~0x00000002); + if (storageInfoBuilder_ == null) { + storageInfo_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto.getDefaultInstance(); + } else { + storageInfoBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000004); + role_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto.NamenodeRoleProto.NAMENODE; + bitField0_ = (bitField0_ & ~0x00000008); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto.getDescriptor(); + } + + public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto getDefaultInstanceForType() { + return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto.getDefaultInstance(); + } + + public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto build() { + org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto buildParsed() + throws com.google.protobuf.InvalidProtocolBufferException { + org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException( + result).asInvalidProtocolBufferException(); + } + return result; + } + + public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto buildPartial() { + org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto result = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + result.rpcAddress_ = rpcAddress_; + if (((from_bitField0_ & 0x00000002) == 0x00000002)) { + to_bitField0_ |= 0x00000002; + } + result.httpAddress_ = httpAddress_; + if (((from_bitField0_ & 0x00000004) == 0x00000004)) { + to_bitField0_ |= 0x00000004; + } + if (storageInfoBuilder_ == null) { + result.storageInfo_ = storageInfo_; + } else { + result.storageInfo_ = storageInfoBuilder_.build(); + } + if (((from_bitField0_ & 0x00000008) == 0x00000008)) { + to_bitField0_ |= 0x00000008; + } + result.role_ = role_; + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto) { + return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto other) { + if (other == org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto.getDefaultInstance()) return this; + if (other.hasRpcAddress()) { + setRpcAddress(other.getRpcAddress()); + } + if (other.hasHttpAddress()) { + setHttpAddress(other.getHttpAddress()); + } + if (other.hasStorageInfo()) { + mergeStorageInfo(other.getStorageInfo()); + } + if (other.hasRole()) { + setRole(other.getRole()); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + if (!hasRpcAddress()) { + + return false; + } + if (!hasHttpAddress()) { + + return false; + } + if (!hasStorageInfo()) { + + return false; + } + if (!getStorageInfo().isInitialized()) { + + return false; + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder( + this.getUnknownFields()); + while (true) { + int tag = input.readTag(); + switch (tag) { + case 0: + this.setUnknownFields(unknownFields.build()); + onChanged(); + return this; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + this.setUnknownFields(unknownFields.build()); + onChanged(); + return this; + } + break; + } + case 10: { + bitField0_ |= 0x00000001; + rpcAddress_ = input.readBytes(); + break; + } + case 18: { + bitField0_ |= 0x00000002; + httpAddress_ = input.readBytes(); + break; + } + case 26: { + org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto.Builder subBuilder = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto.newBuilder(); + if (hasStorageInfo()) { + subBuilder.mergeFrom(getStorageInfo()); + } + input.readMessage(subBuilder, extensionRegistry); + setStorageInfo(subBuilder.buildPartial()); + break; + } + case 32: { + int rawValue = input.readEnum(); + org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto.NamenodeRoleProto value = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto.NamenodeRoleProto.valueOf(rawValue); + if (value == null) { + unknownFields.mergeVarintField(4, rawValue); + } else { + bitField0_ |= 0x00000008; + role_ = value; + } + break; + } + } + } + } + + private int bitField0_; + + // required string rpcAddress = 1; + private java.lang.Object rpcAddress_ = ""; + public boolean hasRpcAddress() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + public String getRpcAddress() { + java.lang.Object ref = rpcAddress_; + if (!(ref instanceof String)) { + String s = ((com.google.protobuf.ByteString) ref).toStringUtf8(); + rpcAddress_ = s; + return s; + } else { + return (String) ref; + } + } + public Builder setRpcAddress(String value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + rpcAddress_ = value; + onChanged(); + return this; + } + public Builder clearRpcAddress() { + bitField0_ = (bitField0_ & ~0x00000001); + rpcAddress_ = getDefaultInstance().getRpcAddress(); + onChanged(); + return this; + } + void setRpcAddress(com.google.protobuf.ByteString value) { + bitField0_ |= 0x00000001; + rpcAddress_ = value; + onChanged(); + } + + // required string httpAddress = 2; + private java.lang.Object httpAddress_ = ""; + public boolean hasHttpAddress() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + public String getHttpAddress() { + java.lang.Object ref = httpAddress_; + if (!(ref instanceof String)) { + String s = ((com.google.protobuf.ByteString) ref).toStringUtf8(); + httpAddress_ = s; + return s; + } else { + return (String) ref; + } + } + public Builder setHttpAddress(String value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000002; + httpAddress_ = value; + onChanged(); + return this; + } + public Builder clearHttpAddress() { + bitField0_ = (bitField0_ & ~0x00000002); + httpAddress_ = getDefaultInstance().getHttpAddress(); + onChanged(); + return this; + } + void setHttpAddress(com.google.protobuf.ByteString value) { + bitField0_ |= 0x00000002; + httpAddress_ = value; + onChanged(); + } + + // required .StorageInfoProto storageInfo = 3; + private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto storageInfo_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto.getDefaultInstance(); + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProtoOrBuilder> storageInfoBuilder_; + public boolean hasStorageInfo() { + return ((bitField0_ & 0x00000004) == 0x00000004); + } + public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto getStorageInfo() { + if (storageInfoBuilder_ == null) { + return storageInfo_; + } else { + return storageInfoBuilder_.getMessage(); + } + } + public Builder setStorageInfo(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto value) { + if (storageInfoBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + storageInfo_ = value; + onChanged(); + } else { + storageInfoBuilder_.setMessage(value); + } + bitField0_ |= 0x00000004; + return this; + } + public Builder setStorageInfo( + org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto.Builder builderForValue) { + if (storageInfoBuilder_ == null) { + storageInfo_ = builderForValue.build(); + onChanged(); + } else { + storageInfoBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000004; + return this; + } + public Builder mergeStorageInfo(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto value) { + if (storageInfoBuilder_ == null) { + if (((bitField0_ & 0x00000004) == 0x00000004) && + storageInfo_ != org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto.getDefaultInstance()) { + storageInfo_ = + org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto.newBuilder(storageInfo_).mergeFrom(value).buildPartial(); + } else { + storageInfo_ = value; + } + onChanged(); + } else { + storageInfoBuilder_.mergeFrom(value); + } + bitField0_ |= 0x00000004; + return this; + } + public Builder clearStorageInfo() { + if (storageInfoBuilder_ == null) { + storageInfo_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto.getDefaultInstance(); + onChanged(); + } else { + storageInfoBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000004); + return this; + } + public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto.Builder getStorageInfoBuilder() { + bitField0_ |= 0x00000004; + onChanged(); + return getStorageInfoFieldBuilder().getBuilder(); + } + public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProtoOrBuilder getStorageInfoOrBuilder() { + if (storageInfoBuilder_ != null) { + return storageInfoBuilder_.getMessageOrBuilder(); + } else { + return storageInfo_; + } + } + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProtoOrBuilder> + getStorageInfoFieldBuilder() { + if (storageInfoBuilder_ == null) { + storageInfoBuilder_ = new com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProtoOrBuilder>( + storageInfo_, + getParentForChildren(), + isClean()); + storageInfo_ = null; + } + return storageInfoBuilder_; + } + + // optional .NamenodeRegistrationProto.NamenodeRoleProto role = 4; + private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto.NamenodeRoleProto role_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto.NamenodeRoleProto.NAMENODE; + public boolean hasRole() { + return ((bitField0_ & 0x00000008) == 0x00000008); + } + public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto.NamenodeRoleProto getRole() { + return role_; + } + public Builder setRole(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto.NamenodeRoleProto value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000008; + role_ = value; + onChanged(); + return this; + } + public Builder clearRole() { + bitField0_ = (bitField0_ & ~0x00000008); + role_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto.NamenodeRoleProto.NAMENODE; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:NamenodeRegistrationProto) + } + + static { + defaultInstance = new NamenodeRegistrationProto(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:NamenodeRegistrationProto) + } + + public interface CheckpointSignatureProtoOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // required string blockPoolId = 1; + boolean hasBlockPoolId(); + String getBlockPoolId(); + + // required uint64 mostRecentCheckpointTxId = 2; + boolean hasMostRecentCheckpointTxId(); + long getMostRecentCheckpointTxId(); + + // required uint64 curSegmentTxId = 3; + boolean hasCurSegmentTxId(); + long getCurSegmentTxId(); + + // required .StorageInfoProto storageInfo = 4; + boolean hasStorageInfo(); + org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto getStorageInfo(); + org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProtoOrBuilder getStorageInfoOrBuilder(); + } + public static final class CheckpointSignatureProto extends + com.google.protobuf.GeneratedMessage + implements CheckpointSignatureProtoOrBuilder { + // Use CheckpointSignatureProto.newBuilder() to construct. + private CheckpointSignatureProto(Builder builder) { + super(builder); + } + private CheckpointSignatureProto(boolean noInit) {} + + private static final CheckpointSignatureProto defaultInstance; + public static CheckpointSignatureProto getDefaultInstance() { + return defaultInstance; + } + + public CheckpointSignatureProto getDefaultInstanceForType() { + return defaultInstance; + } + + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_CheckpointSignatureProto_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_CheckpointSignatureProto_fieldAccessorTable; + } + + private int bitField0_; + // required string blockPoolId = 1; + public static final int BLOCKPOOLID_FIELD_NUMBER = 1; + private java.lang.Object blockPoolId_; + public boolean hasBlockPoolId() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + public String getBlockPoolId() { + java.lang.Object ref = blockPoolId_; + if (ref instanceof String) { + return (String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + String s = bs.toStringUtf8(); + if (com.google.protobuf.Internal.isValidUtf8(bs)) { + blockPoolId_ = s; + } + return s; + } + } + private com.google.protobuf.ByteString getBlockPoolIdBytes() { + java.lang.Object ref = blockPoolId_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((String) ref); + blockPoolId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + // required uint64 mostRecentCheckpointTxId = 2; + public static final int MOSTRECENTCHECKPOINTTXID_FIELD_NUMBER = 2; + private long mostRecentCheckpointTxId_; + public boolean hasMostRecentCheckpointTxId() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + public long getMostRecentCheckpointTxId() { + return mostRecentCheckpointTxId_; + } + + // required uint64 curSegmentTxId = 3; + public static final int CURSEGMENTTXID_FIELD_NUMBER = 3; + private long curSegmentTxId_; + public boolean hasCurSegmentTxId() { + return ((bitField0_ & 0x00000004) == 0x00000004); + } + public long getCurSegmentTxId() { + return curSegmentTxId_; + } + + // required .StorageInfoProto storageInfo = 4; + public static final int STORAGEINFO_FIELD_NUMBER = 4; + private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto storageInfo_; + public boolean hasStorageInfo() { + return ((bitField0_ & 0x00000008) == 0x00000008); + } + public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto getStorageInfo() { + return storageInfo_; + } + public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProtoOrBuilder getStorageInfoOrBuilder() { + return storageInfo_; + } + + private void initFields() { + blockPoolId_ = ""; + mostRecentCheckpointTxId_ = 0L; + curSegmentTxId_ = 0L; + storageInfo_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto.getDefaultInstance(); + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + if (!hasBlockPoolId()) { + memoizedIsInitialized = 0; + return false; + } + if (!hasMostRecentCheckpointTxId()) { + memoizedIsInitialized = 0; + return false; + } + if (!hasCurSegmentTxId()) { + memoizedIsInitialized = 0; + return false; + } + if (!hasStorageInfo()) { + memoizedIsInitialized = 0; + return false; + } + if (!getStorageInfo().isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeBytes(1, getBlockPoolIdBytes()); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + output.writeUInt64(2, mostRecentCheckpointTxId_); + } + if (((bitField0_ & 0x00000004) == 0x00000004)) { + output.writeUInt64(3, curSegmentTxId_); + } + if (((bitField0_ & 0x00000008) == 0x00000008)) { + output.writeMessage(4, storageInfo_); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(1, getBlockPoolIdBytes()); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + size += com.google.protobuf.CodedOutputStream + .computeUInt64Size(2, mostRecentCheckpointTxId_); + } + if (((bitField0_ & 0x00000004) == 0x00000004)) { + size += com.google.protobuf.CodedOutputStream + .computeUInt64Size(3, curSegmentTxId_); + } + if (((bitField0_ & 0x00000008) == 0x00000008)) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(4, storageInfo_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointSignatureProto)) { + return super.equals(obj); + } + org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointSignatureProto other = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointSignatureProto) obj; + + boolean result = true; + result = result && (hasBlockPoolId() == other.hasBlockPoolId()); + if (hasBlockPoolId()) { + result = result && getBlockPoolId() + .equals(other.getBlockPoolId()); + } + result = result && (hasMostRecentCheckpointTxId() == other.hasMostRecentCheckpointTxId()); + if (hasMostRecentCheckpointTxId()) { + result = result && (getMostRecentCheckpointTxId() + == other.getMostRecentCheckpointTxId()); + } + result = result && (hasCurSegmentTxId() == other.hasCurSegmentTxId()); + if (hasCurSegmentTxId()) { + result = result && (getCurSegmentTxId() + == other.getCurSegmentTxId()); + } + result = result && (hasStorageInfo() == other.hasStorageInfo()); + if (hasStorageInfo()) { + result = result && getStorageInfo() + .equals(other.getStorageInfo()); + } + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + @java.lang.Override + public int hashCode() { + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + if (hasBlockPoolId()) { + hash = (37 * hash) + BLOCKPOOLID_FIELD_NUMBER; + hash = (53 * hash) + getBlockPoolId().hashCode(); + } + if (hasMostRecentCheckpointTxId()) { + hash = (37 * hash) + MOSTRECENTCHECKPOINTTXID_FIELD_NUMBER; + hash = (53 * hash) + hashLong(getMostRecentCheckpointTxId()); + } + if (hasCurSegmentTxId()) { + hash = (37 * hash) + CURSEGMENTTXID_FIELD_NUMBER; + hash = (53 * hash) + hashLong(getCurSegmentTxId()); + } + if (hasStorageInfo()) { + hash = (37 * hash) + STORAGEINFO_FIELD_NUMBER; + hash = (53 * hash) + getStorageInfo().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + return hash; + } + + public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointSignatureProto parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data).buildParsed(); + } + public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointSignatureProto parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data, extensionRegistry) + .buildParsed(); + } + public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointSignatureProto parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data).buildParsed(); + } + public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointSignatureProto parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data, extensionRegistry) + .buildParsed(); + } + public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointSignatureProto parseFrom(java.io.InputStream input) + throws java.io.IOException { + return newBuilder().mergeFrom(input).buildParsed(); + } + public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointSignatureProto parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return newBuilder().mergeFrom(input, extensionRegistry) + .buildParsed(); + } + public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointSignatureProto parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + Builder builder = newBuilder(); + if (builder.mergeDelimitedFrom(input)) { + return builder.buildParsed(); + } else { + return null; + } + } + public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointSignatureProto parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + Builder builder = newBuilder(); + if (builder.mergeDelimitedFrom(input, extensionRegistry)) { + return builder.buildParsed(); + } else { + return null; + } + } + public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointSignatureProto parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return newBuilder().mergeFrom(input).buildParsed(); + } + public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointSignatureProto parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return newBuilder().mergeFrom(input, extensionRegistry) + .buildParsed(); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointSignatureProto prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointSignatureProtoOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_CheckpointSignatureProto_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_CheckpointSignatureProto_fieldAccessorTable; + } + + // Construct using org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointSignatureProto.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + getStorageInfoFieldBuilder(); + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + blockPoolId_ = ""; + bitField0_ = (bitField0_ & ~0x00000001); + mostRecentCheckpointTxId_ = 0L; + bitField0_ = (bitField0_ & ~0x00000002); + curSegmentTxId_ = 0L; + bitField0_ = (bitField0_ & ~0x00000004); + if (storageInfoBuilder_ == null) { + storageInfo_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto.getDefaultInstance(); + } else { + storageInfoBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000008); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointSignatureProto.getDescriptor(); + } + + public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointSignatureProto getDefaultInstanceForType() { + return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointSignatureProto.getDefaultInstance(); + } + + public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointSignatureProto build() { + org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointSignatureProto result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointSignatureProto buildParsed() + throws com.google.protobuf.InvalidProtocolBufferException { + org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointSignatureProto result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException( + result).asInvalidProtocolBufferException(); + } + return result; + } + + public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointSignatureProto buildPartial() { + org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointSignatureProto result = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointSignatureProto(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + result.blockPoolId_ = blockPoolId_; + if (((from_bitField0_ & 0x00000002) == 0x00000002)) { + to_bitField0_ |= 0x00000002; + } + result.mostRecentCheckpointTxId_ = mostRecentCheckpointTxId_; + if (((from_bitField0_ & 0x00000004) == 0x00000004)) { + to_bitField0_ |= 0x00000004; + } + result.curSegmentTxId_ = curSegmentTxId_; + if (((from_bitField0_ & 0x00000008) == 0x00000008)) { + to_bitField0_ |= 0x00000008; + } + if (storageInfoBuilder_ == null) { + result.storageInfo_ = storageInfo_; + } else { + result.storageInfo_ = storageInfoBuilder_.build(); + } + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointSignatureProto) { + return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointSignatureProto)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointSignatureProto other) { + if (other == org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointSignatureProto.getDefaultInstance()) return this; + if (other.hasBlockPoolId()) { + setBlockPoolId(other.getBlockPoolId()); + } + if (other.hasMostRecentCheckpointTxId()) { + setMostRecentCheckpointTxId(other.getMostRecentCheckpointTxId()); + } + if (other.hasCurSegmentTxId()) { + setCurSegmentTxId(other.getCurSegmentTxId()); + } + if (other.hasStorageInfo()) { + mergeStorageInfo(other.getStorageInfo()); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + if (!hasBlockPoolId()) { + + return false; + } + if (!hasMostRecentCheckpointTxId()) { + + return false; + } + if (!hasCurSegmentTxId()) { + + return false; + } + if (!hasStorageInfo()) { + + return false; + } + if (!getStorageInfo().isInitialized()) { + + return false; + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder( + this.getUnknownFields()); + while (true) { + int tag = input.readTag(); + switch (tag) { + case 0: + this.setUnknownFields(unknownFields.build()); + onChanged(); + return this; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + this.setUnknownFields(unknownFields.build()); + onChanged(); + return this; + } + break; + } + case 10: { + bitField0_ |= 0x00000001; + blockPoolId_ = input.readBytes(); + break; + } + case 16: { + bitField0_ |= 0x00000002; + mostRecentCheckpointTxId_ = input.readUInt64(); + break; + } + case 24: { + bitField0_ |= 0x00000004; + curSegmentTxId_ = input.readUInt64(); + break; + } + case 34: { + org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto.Builder subBuilder = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto.newBuilder(); + if (hasStorageInfo()) { + subBuilder.mergeFrom(getStorageInfo()); + } + input.readMessage(subBuilder, extensionRegistry); + setStorageInfo(subBuilder.buildPartial()); + break; + } + } + } + } + + private int bitField0_; + + // required string blockPoolId = 1; + private java.lang.Object blockPoolId_ = ""; + public boolean hasBlockPoolId() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + public String getBlockPoolId() { + java.lang.Object ref = blockPoolId_; + if (!(ref instanceof String)) { + String s = ((com.google.protobuf.ByteString) ref).toStringUtf8(); + blockPoolId_ = s; + return s; + } else { + return (String) ref; + } + } + public Builder setBlockPoolId(String value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + blockPoolId_ = value; + onChanged(); + return this; + } + public Builder clearBlockPoolId() { + bitField0_ = (bitField0_ & ~0x00000001); + blockPoolId_ = getDefaultInstance().getBlockPoolId(); + onChanged(); + return this; + } + void setBlockPoolId(com.google.protobuf.ByteString value) { + bitField0_ |= 0x00000001; + blockPoolId_ = value; + onChanged(); + } + + // required uint64 mostRecentCheckpointTxId = 2; + private long mostRecentCheckpointTxId_ ; + public boolean hasMostRecentCheckpointTxId() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + public long getMostRecentCheckpointTxId() { + return mostRecentCheckpointTxId_; + } + public Builder setMostRecentCheckpointTxId(long value) { + bitField0_ |= 0x00000002; + mostRecentCheckpointTxId_ = value; + onChanged(); + return this; + } + public Builder clearMostRecentCheckpointTxId() { + bitField0_ = (bitField0_ & ~0x00000002); + mostRecentCheckpointTxId_ = 0L; + onChanged(); + return this; + } + + // required uint64 curSegmentTxId = 3; + private long curSegmentTxId_ ; + public boolean hasCurSegmentTxId() { + return ((bitField0_ & 0x00000004) == 0x00000004); + } + public long getCurSegmentTxId() { + return curSegmentTxId_; + } + public Builder setCurSegmentTxId(long value) { + bitField0_ |= 0x00000004; + curSegmentTxId_ = value; + onChanged(); + return this; + } + public Builder clearCurSegmentTxId() { + bitField0_ = (bitField0_ & ~0x00000004); + curSegmentTxId_ = 0L; + onChanged(); + return this; + } + + // required .StorageInfoProto storageInfo = 4; + private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto storageInfo_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto.getDefaultInstance(); + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProtoOrBuilder> storageInfoBuilder_; + public boolean hasStorageInfo() { + return ((bitField0_ & 0x00000008) == 0x00000008); + } + public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto getStorageInfo() { + if (storageInfoBuilder_ == null) { + return storageInfo_; + } else { + return storageInfoBuilder_.getMessage(); + } + } + public Builder setStorageInfo(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto value) { + if (storageInfoBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + storageInfo_ = value; + onChanged(); + } else { + storageInfoBuilder_.setMessage(value); + } + bitField0_ |= 0x00000008; + return this; + } + public Builder setStorageInfo( + org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto.Builder builderForValue) { + if (storageInfoBuilder_ == null) { + storageInfo_ = builderForValue.build(); + onChanged(); + } else { + storageInfoBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000008; + return this; + } + public Builder mergeStorageInfo(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto value) { + if (storageInfoBuilder_ == null) { + if (((bitField0_ & 0x00000008) == 0x00000008) && + storageInfo_ != org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto.getDefaultInstance()) { + storageInfo_ = + org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto.newBuilder(storageInfo_).mergeFrom(value).buildPartial(); + } else { + storageInfo_ = value; + } + onChanged(); + } else { + storageInfoBuilder_.mergeFrom(value); + } + bitField0_ |= 0x00000008; + return this; + } + public Builder clearStorageInfo() { + if (storageInfoBuilder_ == null) { + storageInfo_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto.getDefaultInstance(); + onChanged(); + } else { + storageInfoBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000008); + return this; + } + public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto.Builder getStorageInfoBuilder() { + bitField0_ |= 0x00000008; + onChanged(); + return getStorageInfoFieldBuilder().getBuilder(); + } + public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProtoOrBuilder getStorageInfoOrBuilder() { + if (storageInfoBuilder_ != null) { + return storageInfoBuilder_.getMessageOrBuilder(); + } else { + return storageInfo_; + } + } + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProtoOrBuilder> + getStorageInfoFieldBuilder() { + if (storageInfoBuilder_ == null) { + storageInfoBuilder_ = new com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProtoOrBuilder>( + storageInfo_, + getParentForChildren(), + isClean()); + storageInfo_ = null; + } + return storageInfoBuilder_; + } + + // @@protoc_insertion_point(builder_scope:CheckpointSignatureProto) + } + + static { + defaultInstance = new CheckpointSignatureProto(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:CheckpointSignatureProto) + } + + public interface NamenodeCommandProtoOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // required uint32 action = 1; + boolean hasAction(); + int getAction(); + + // required .NamenodeCommandProto.Type type = 2; + boolean hasType(); + org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeCommandProto.Type getType(); + + // optional .CheckpointCommandProto checkpointCmd = 3; + boolean hasCheckpointCmd(); + org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointCommandProto getCheckpointCmd(); + org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointCommandProtoOrBuilder getCheckpointCmdOrBuilder(); + } + public static final class NamenodeCommandProto extends + com.google.protobuf.GeneratedMessage + implements NamenodeCommandProtoOrBuilder { + // Use NamenodeCommandProto.newBuilder() to construct. + private NamenodeCommandProto(Builder builder) { + super(builder); + } + private NamenodeCommandProto(boolean noInit) {} + + private static final NamenodeCommandProto defaultInstance; + public static NamenodeCommandProto getDefaultInstance() { + return defaultInstance; + } + + public NamenodeCommandProto getDefaultInstanceForType() { + return defaultInstance; + } + + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_NamenodeCommandProto_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_NamenodeCommandProto_fieldAccessorTable; + } + + public enum Type + implements com.google.protobuf.ProtocolMessageEnum { + NamenodeCommand(0, 0), + CheckPointCommand(1, 1), + ; + + public static final int NamenodeCommand_VALUE = 0; + public static final int CheckPointCommand_VALUE = 1; + + + public final int getNumber() { return value; } + + public static Type valueOf(int value) { + switch (value) { + case 0: return NamenodeCommand; + case 1: return CheckPointCommand; + default: return null; + } + } + + public static com.google.protobuf.Internal.EnumLiteMap + internalGetValueMap() { + return internalValueMap; + } + private static com.google.protobuf.Internal.EnumLiteMap + internalValueMap = + new com.google.protobuf.Internal.EnumLiteMap() { + public Type findValueByNumber(int number) { + return Type.valueOf(number); + } + }; + + public final com.google.protobuf.Descriptors.EnumValueDescriptor + getValueDescriptor() { + return getDescriptor().getValues().get(index); + } + public final com.google.protobuf.Descriptors.EnumDescriptor + getDescriptorForType() { + return getDescriptor(); + } + public static final com.google.protobuf.Descriptors.EnumDescriptor + getDescriptor() { + return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeCommandProto.getDescriptor().getEnumTypes().get(0); + } + + private static final Type[] VALUES = { + NamenodeCommand, CheckPointCommand, + }; + + public static Type valueOf( + com.google.protobuf.Descriptors.EnumValueDescriptor desc) { + if (desc.getType() != getDescriptor()) { + throw new java.lang.IllegalArgumentException( + "EnumValueDescriptor is not for this type."); + } + return VALUES[desc.getIndex()]; + } + + private final int index; + private final int value; + + private Type(int index, int value) { + this.index = index; + this.value = value; + } + + // @@protoc_insertion_point(enum_scope:NamenodeCommandProto.Type) + } + + private int bitField0_; + // required uint32 action = 1; + public static final int ACTION_FIELD_NUMBER = 1; + private int action_; + public boolean hasAction() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + public int getAction() { + return action_; + } + + // required .NamenodeCommandProto.Type type = 2; + public static final int TYPE_FIELD_NUMBER = 2; + private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeCommandProto.Type type_; + public boolean hasType() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeCommandProto.Type getType() { + return type_; + } + + // optional .CheckpointCommandProto checkpointCmd = 3; + public static final int CHECKPOINTCMD_FIELD_NUMBER = 3; + private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointCommandProto checkpointCmd_; + public boolean hasCheckpointCmd() { + return ((bitField0_ & 0x00000004) == 0x00000004); + } + public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointCommandProto getCheckpointCmd() { + return checkpointCmd_; + } + public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointCommandProtoOrBuilder getCheckpointCmdOrBuilder() { + return checkpointCmd_; + } + + private void initFields() { + action_ = 0; + type_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeCommandProto.Type.NamenodeCommand; + checkpointCmd_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointCommandProto.getDefaultInstance(); + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + if (!hasAction()) { + memoizedIsInitialized = 0; + return false; + } + if (!hasType()) { + memoizedIsInitialized = 0; + return false; + } + if (hasCheckpointCmd()) { + if (!getCheckpointCmd().isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeUInt32(1, action_); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + output.writeEnum(2, type_.getNumber()); + } + if (((bitField0_ & 0x00000004) == 0x00000004)) { + output.writeMessage(3, checkpointCmd_); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeUInt32Size(1, action_); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + size += com.google.protobuf.CodedOutputStream + .computeEnumSize(2, type_.getNumber()); + } + if (((bitField0_ & 0x00000004) == 0x00000004)) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(3, checkpointCmd_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeCommandProto)) { + return super.equals(obj); + } + org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeCommandProto other = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeCommandProto) obj; + + boolean result = true; + result = result && (hasAction() == other.hasAction()); + if (hasAction()) { + result = result && (getAction() + == other.getAction()); + } + result = result && (hasType() == other.hasType()); + if (hasType()) { + result = result && + (getType() == other.getType()); + } + result = result && (hasCheckpointCmd() == other.hasCheckpointCmd()); + if (hasCheckpointCmd()) { + result = result && getCheckpointCmd() + .equals(other.getCheckpointCmd()); + } + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + @java.lang.Override + public int hashCode() { + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + if (hasAction()) { + hash = (37 * hash) + ACTION_FIELD_NUMBER; + hash = (53 * hash) + getAction(); + } + if (hasType()) { + hash = (37 * hash) + TYPE_FIELD_NUMBER; + hash = (53 * hash) + hashEnum(getType()); + } + if (hasCheckpointCmd()) { + hash = (37 * hash) + CHECKPOINTCMD_FIELD_NUMBER; + hash = (53 * hash) + getCheckpointCmd().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + return hash; + } + + public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeCommandProto parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data).buildParsed(); + } + public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeCommandProto parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data, extensionRegistry) + .buildParsed(); + } + public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeCommandProto parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data).buildParsed(); + } + public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeCommandProto parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data, extensionRegistry) + .buildParsed(); + } + public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeCommandProto parseFrom(java.io.InputStream input) + throws java.io.IOException { + return newBuilder().mergeFrom(input).buildParsed(); + } + public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeCommandProto parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return newBuilder().mergeFrom(input, extensionRegistry) + .buildParsed(); + } + public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeCommandProto parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + Builder builder = newBuilder(); + if (builder.mergeDelimitedFrom(input)) { + return builder.buildParsed(); + } else { + return null; + } + } + public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeCommandProto parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + Builder builder = newBuilder(); + if (builder.mergeDelimitedFrom(input, extensionRegistry)) { + return builder.buildParsed(); + } else { + return null; + } + } + public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeCommandProto parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return newBuilder().mergeFrom(input).buildParsed(); + } + public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeCommandProto parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return newBuilder().mergeFrom(input, extensionRegistry) + .buildParsed(); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeCommandProto prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeCommandProtoOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_NamenodeCommandProto_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_NamenodeCommandProto_fieldAccessorTable; + } + + // Construct using org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeCommandProto.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + getCheckpointCmdFieldBuilder(); + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + action_ = 0; + bitField0_ = (bitField0_ & ~0x00000001); + type_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeCommandProto.Type.NamenodeCommand; + bitField0_ = (bitField0_ & ~0x00000002); + if (checkpointCmdBuilder_ == null) { + checkpointCmd_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointCommandProto.getDefaultInstance(); + } else { + checkpointCmdBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000004); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeCommandProto.getDescriptor(); + } + + public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeCommandProto getDefaultInstanceForType() { + return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeCommandProto.getDefaultInstance(); + } + + public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeCommandProto build() { + org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeCommandProto result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeCommandProto buildParsed() + throws com.google.protobuf.InvalidProtocolBufferException { + org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeCommandProto result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException( + result).asInvalidProtocolBufferException(); + } + return result; + } + + public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeCommandProto buildPartial() { + org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeCommandProto result = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeCommandProto(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + result.action_ = action_; + if (((from_bitField0_ & 0x00000002) == 0x00000002)) { + to_bitField0_ |= 0x00000002; + } + result.type_ = type_; + if (((from_bitField0_ & 0x00000004) == 0x00000004)) { + to_bitField0_ |= 0x00000004; + } + if (checkpointCmdBuilder_ == null) { + result.checkpointCmd_ = checkpointCmd_; + } else { + result.checkpointCmd_ = checkpointCmdBuilder_.build(); + } + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeCommandProto) { + return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeCommandProto)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeCommandProto other) { + if (other == org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeCommandProto.getDefaultInstance()) return this; + if (other.hasAction()) { + setAction(other.getAction()); + } + if (other.hasType()) { + setType(other.getType()); + } + if (other.hasCheckpointCmd()) { + mergeCheckpointCmd(other.getCheckpointCmd()); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + if (!hasAction()) { + + return false; + } + if (!hasType()) { + + return false; + } + if (hasCheckpointCmd()) { + if (!getCheckpointCmd().isInitialized()) { + + return false; + } + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder( + this.getUnknownFields()); + while (true) { + int tag = input.readTag(); + switch (tag) { + case 0: + this.setUnknownFields(unknownFields.build()); + onChanged(); + return this; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + this.setUnknownFields(unknownFields.build()); + onChanged(); + return this; + } + break; + } + case 8: { + bitField0_ |= 0x00000001; + action_ = input.readUInt32(); + break; + } + case 16: { + int rawValue = input.readEnum(); + org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeCommandProto.Type value = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeCommandProto.Type.valueOf(rawValue); + if (value == null) { + unknownFields.mergeVarintField(2, rawValue); + } else { + bitField0_ |= 0x00000002; + type_ = value; + } + break; + } + case 26: { + org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointCommandProto.Builder subBuilder = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointCommandProto.newBuilder(); + if (hasCheckpointCmd()) { + subBuilder.mergeFrom(getCheckpointCmd()); + } + input.readMessage(subBuilder, extensionRegistry); + setCheckpointCmd(subBuilder.buildPartial()); + break; + } + } + } + } + + private int bitField0_; + + // required uint32 action = 1; + private int action_ ; + public boolean hasAction() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + public int getAction() { + return action_; + } + public Builder setAction(int value) { + bitField0_ |= 0x00000001; + action_ = value; + onChanged(); + return this; + } + public Builder clearAction() { + bitField0_ = (bitField0_ & ~0x00000001); + action_ = 0; + onChanged(); + return this; + } + + // required .NamenodeCommandProto.Type type = 2; + private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeCommandProto.Type type_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeCommandProto.Type.NamenodeCommand; + public boolean hasType() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeCommandProto.Type getType() { + return type_; + } + public Builder setType(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeCommandProto.Type value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000002; + type_ = value; + onChanged(); + return this; + } + public Builder clearType() { + bitField0_ = (bitField0_ & ~0x00000002); + type_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeCommandProto.Type.NamenodeCommand; + onChanged(); + return this; + } + + // optional .CheckpointCommandProto checkpointCmd = 3; + private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointCommandProto checkpointCmd_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointCommandProto.getDefaultInstance(); + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointCommandProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointCommandProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointCommandProtoOrBuilder> checkpointCmdBuilder_; + public boolean hasCheckpointCmd() { + return ((bitField0_ & 0x00000004) == 0x00000004); + } + public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointCommandProto getCheckpointCmd() { + if (checkpointCmdBuilder_ == null) { + return checkpointCmd_; + } else { + return checkpointCmdBuilder_.getMessage(); + } + } + public Builder setCheckpointCmd(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointCommandProto value) { + if (checkpointCmdBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + checkpointCmd_ = value; + onChanged(); + } else { + checkpointCmdBuilder_.setMessage(value); + } + bitField0_ |= 0x00000004; + return this; + } + public Builder setCheckpointCmd( + org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointCommandProto.Builder builderForValue) { + if (checkpointCmdBuilder_ == null) { + checkpointCmd_ = builderForValue.build(); + onChanged(); + } else { + checkpointCmdBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000004; + return this; + } + public Builder mergeCheckpointCmd(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointCommandProto value) { + if (checkpointCmdBuilder_ == null) { + if (((bitField0_ & 0x00000004) == 0x00000004) && + checkpointCmd_ != org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointCommandProto.getDefaultInstance()) { + checkpointCmd_ = + org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointCommandProto.newBuilder(checkpointCmd_).mergeFrom(value).buildPartial(); + } else { + checkpointCmd_ = value; + } + onChanged(); + } else { + checkpointCmdBuilder_.mergeFrom(value); + } + bitField0_ |= 0x00000004; + return this; + } + public Builder clearCheckpointCmd() { + if (checkpointCmdBuilder_ == null) { + checkpointCmd_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointCommandProto.getDefaultInstance(); + onChanged(); + } else { + checkpointCmdBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000004); + return this; + } + public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointCommandProto.Builder getCheckpointCmdBuilder() { + bitField0_ |= 0x00000004; + onChanged(); + return getCheckpointCmdFieldBuilder().getBuilder(); + } + public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointCommandProtoOrBuilder getCheckpointCmdOrBuilder() { + if (checkpointCmdBuilder_ != null) { + return checkpointCmdBuilder_.getMessageOrBuilder(); + } else { + return checkpointCmd_; + } + } + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointCommandProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointCommandProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointCommandProtoOrBuilder> + getCheckpointCmdFieldBuilder() { + if (checkpointCmdBuilder_ == null) { + checkpointCmdBuilder_ = new com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointCommandProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointCommandProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointCommandProtoOrBuilder>( + checkpointCmd_, + getParentForChildren(), + isClean()); + checkpointCmd_ = null; + } + return checkpointCmdBuilder_; + } + + // @@protoc_insertion_point(builder_scope:NamenodeCommandProto) + } + + static { + defaultInstance = new NamenodeCommandProto(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:NamenodeCommandProto) + } + + public interface CheckpointCommandProtoOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // required .CheckpointSignatureProto signature = 1; + boolean hasSignature(); + org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointSignatureProto getSignature(); + org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointSignatureProtoOrBuilder getSignatureOrBuilder(); + + // required bool needToReturnImage = 2; + boolean hasNeedToReturnImage(); + boolean getNeedToReturnImage(); + } + public static final class CheckpointCommandProto extends + com.google.protobuf.GeneratedMessage + implements CheckpointCommandProtoOrBuilder { + // Use CheckpointCommandProto.newBuilder() to construct. + private CheckpointCommandProto(Builder builder) { + super(builder); + } + private CheckpointCommandProto(boolean noInit) {} + + private static final CheckpointCommandProto defaultInstance; + public static CheckpointCommandProto getDefaultInstance() { + return defaultInstance; + } + + public CheckpointCommandProto getDefaultInstanceForType() { + return defaultInstance; + } + + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_CheckpointCommandProto_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_CheckpointCommandProto_fieldAccessorTable; + } + + private int bitField0_; + // required .CheckpointSignatureProto signature = 1; + public static final int SIGNATURE_FIELD_NUMBER = 1; + private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointSignatureProto signature_; + public boolean hasSignature() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointSignatureProto getSignature() { + return signature_; + } + public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointSignatureProtoOrBuilder getSignatureOrBuilder() { + return signature_; + } + + // required bool needToReturnImage = 2; + public static final int NEEDTORETURNIMAGE_FIELD_NUMBER = 2; + private boolean needToReturnImage_; + public boolean hasNeedToReturnImage() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + public boolean getNeedToReturnImage() { + return needToReturnImage_; + } + + private void initFields() { + signature_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointSignatureProto.getDefaultInstance(); + needToReturnImage_ = false; + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + if (!hasSignature()) { + memoizedIsInitialized = 0; + return false; + } + if (!hasNeedToReturnImage()) { + memoizedIsInitialized = 0; + return false; + } + if (!getSignature().isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeMessage(1, signature_); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + output.writeBool(2, needToReturnImage_); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(1, signature_); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + size += com.google.protobuf.CodedOutputStream + .computeBoolSize(2, needToReturnImage_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointCommandProto)) { + return super.equals(obj); + } + org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointCommandProto other = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointCommandProto) obj; + + boolean result = true; + result = result && (hasSignature() == other.hasSignature()); + if (hasSignature()) { + result = result && getSignature() + .equals(other.getSignature()); + } + result = result && (hasNeedToReturnImage() == other.hasNeedToReturnImage()); + if (hasNeedToReturnImage()) { + result = result && (getNeedToReturnImage() + == other.getNeedToReturnImage()); + } + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + @java.lang.Override + public int hashCode() { + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + if (hasSignature()) { + hash = (37 * hash) + SIGNATURE_FIELD_NUMBER; + hash = (53 * hash) + getSignature().hashCode(); + } + if (hasNeedToReturnImage()) { + hash = (37 * hash) + NEEDTORETURNIMAGE_FIELD_NUMBER; + hash = (53 * hash) + hashBoolean(getNeedToReturnImage()); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + return hash; + } + + public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointCommandProto parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data).buildParsed(); + } + public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointCommandProto parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data, extensionRegistry) + .buildParsed(); + } + public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointCommandProto parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data).buildParsed(); + } + public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointCommandProto parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data, extensionRegistry) + .buildParsed(); + } + public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointCommandProto parseFrom(java.io.InputStream input) + throws java.io.IOException { + return newBuilder().mergeFrom(input).buildParsed(); + } + public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointCommandProto parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return newBuilder().mergeFrom(input, extensionRegistry) + .buildParsed(); + } + public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointCommandProto parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + Builder builder = newBuilder(); + if (builder.mergeDelimitedFrom(input)) { + return builder.buildParsed(); + } else { + return null; + } + } + public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointCommandProto parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + Builder builder = newBuilder(); + if (builder.mergeDelimitedFrom(input, extensionRegistry)) { + return builder.buildParsed(); + } else { + return null; + } + } + public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointCommandProto parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return newBuilder().mergeFrom(input).buildParsed(); + } + public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointCommandProto parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return newBuilder().mergeFrom(input, extensionRegistry) + .buildParsed(); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointCommandProto prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointCommandProtoOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_CheckpointCommandProto_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_CheckpointCommandProto_fieldAccessorTable; + } + + // Construct using org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointCommandProto.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + getSignatureFieldBuilder(); + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + if (signatureBuilder_ == null) { + signature_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointSignatureProto.getDefaultInstance(); + } else { + signatureBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000001); + needToReturnImage_ = false; + bitField0_ = (bitField0_ & ~0x00000002); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointCommandProto.getDescriptor(); + } + + public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointCommandProto getDefaultInstanceForType() { + return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointCommandProto.getDefaultInstance(); + } + + public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointCommandProto build() { + org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointCommandProto result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointCommandProto buildParsed() + throws com.google.protobuf.InvalidProtocolBufferException { + org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointCommandProto result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException( + result).asInvalidProtocolBufferException(); + } + return result; + } + + public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointCommandProto buildPartial() { + org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointCommandProto result = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointCommandProto(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + if (signatureBuilder_ == null) { + result.signature_ = signature_; + } else { + result.signature_ = signatureBuilder_.build(); + } + if (((from_bitField0_ & 0x00000002) == 0x00000002)) { + to_bitField0_ |= 0x00000002; + } + result.needToReturnImage_ = needToReturnImage_; + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointCommandProto) { + return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointCommandProto)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointCommandProto other) { + if (other == org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointCommandProto.getDefaultInstance()) return this; + if (other.hasSignature()) { + mergeSignature(other.getSignature()); + } + if (other.hasNeedToReturnImage()) { + setNeedToReturnImage(other.getNeedToReturnImage()); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + if (!hasSignature()) { + + return false; + } + if (!hasNeedToReturnImage()) { + + return false; + } + if (!getSignature().isInitialized()) { + + return false; + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder( + this.getUnknownFields()); + while (true) { + int tag = input.readTag(); + switch (tag) { + case 0: + this.setUnknownFields(unknownFields.build()); + onChanged(); + return this; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + this.setUnknownFields(unknownFields.build()); + onChanged(); + return this; + } + break; + } + case 10: { + org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointSignatureProto.Builder subBuilder = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointSignatureProto.newBuilder(); + if (hasSignature()) { + subBuilder.mergeFrom(getSignature()); + } + input.readMessage(subBuilder, extensionRegistry); + setSignature(subBuilder.buildPartial()); + break; + } + case 16: { + bitField0_ |= 0x00000002; + needToReturnImage_ = input.readBool(); + break; + } + } + } + } + + private int bitField0_; + + // required .CheckpointSignatureProto signature = 1; + private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointSignatureProto signature_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointSignatureProto.getDefaultInstance(); + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointSignatureProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointSignatureProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointSignatureProtoOrBuilder> signatureBuilder_; + public boolean hasSignature() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointSignatureProto getSignature() { + if (signatureBuilder_ == null) { + return signature_; + } else { + return signatureBuilder_.getMessage(); + } + } + public Builder setSignature(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointSignatureProto value) { + if (signatureBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + signature_ = value; + onChanged(); + } else { + signatureBuilder_.setMessage(value); + } + bitField0_ |= 0x00000001; + return this; + } + public Builder setSignature( + org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointSignatureProto.Builder builderForValue) { + if (signatureBuilder_ == null) { + signature_ = builderForValue.build(); + onChanged(); + } else { + signatureBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000001; + return this; + } + public Builder mergeSignature(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointSignatureProto value) { + if (signatureBuilder_ == null) { + if (((bitField0_ & 0x00000001) == 0x00000001) && + signature_ != org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointSignatureProto.getDefaultInstance()) { + signature_ = + org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointSignatureProto.newBuilder(signature_).mergeFrom(value).buildPartial(); + } else { + signature_ = value; + } + onChanged(); + } else { + signatureBuilder_.mergeFrom(value); + } + bitField0_ |= 0x00000001; + return this; + } + public Builder clearSignature() { + if (signatureBuilder_ == null) { + signature_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointSignatureProto.getDefaultInstance(); + onChanged(); + } else { + signatureBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000001); + return this; + } + public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointSignatureProto.Builder getSignatureBuilder() { + bitField0_ |= 0x00000001; + onChanged(); + return getSignatureFieldBuilder().getBuilder(); + } + public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointSignatureProtoOrBuilder getSignatureOrBuilder() { + if (signatureBuilder_ != null) { + return signatureBuilder_.getMessageOrBuilder(); + } else { + return signature_; + } + } + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointSignatureProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointSignatureProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointSignatureProtoOrBuilder> + getSignatureFieldBuilder() { + if (signatureBuilder_ == null) { + signatureBuilder_ = new com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointSignatureProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointSignatureProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointSignatureProtoOrBuilder>( + signature_, + getParentForChildren(), + isClean()); + signature_ = null; + } + return signatureBuilder_; + } + + // required bool needToReturnImage = 2; + private boolean needToReturnImage_ ; + public boolean hasNeedToReturnImage() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + public boolean getNeedToReturnImage() { + return needToReturnImage_; + } + public Builder setNeedToReturnImage(boolean value) { + bitField0_ |= 0x00000002; + needToReturnImage_ = value; + onChanged(); + return this; + } + public Builder clearNeedToReturnImage() { + bitField0_ = (bitField0_ & ~0x00000002); + needToReturnImage_ = false; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:CheckpointCommandProto) + } + + static { + defaultInstance = new CheckpointCommandProto(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:CheckpointCommandProto) + } + + public interface BlockProtoOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // required uint64 blockId = 1; + boolean hasBlockId(); + long getBlockId(); + + // required uint64 genStamp = 2; + boolean hasGenStamp(); + long getGenStamp(); + + // optional uint64 numBytes = 3; + boolean hasNumBytes(); + long getNumBytes(); + } + public static final class BlockProto extends + com.google.protobuf.GeneratedMessage + implements BlockProtoOrBuilder { + // Use BlockProto.newBuilder() to construct. + private BlockProto(Builder builder) { + super(builder); + } + private BlockProto(boolean noInit) {} + + private static final BlockProto defaultInstance; + public static BlockProto getDefaultInstance() { + return defaultInstance; + } + + public BlockProto getDefaultInstanceForType() { + return defaultInstance; + } + + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_BlockProto_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_BlockProto_fieldAccessorTable; + } + + private int bitField0_; + // required uint64 blockId = 1; + public static final int BLOCKID_FIELD_NUMBER = 1; + private long blockId_; + public boolean hasBlockId() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + public long getBlockId() { + return blockId_; + } + + // required uint64 genStamp = 2; + public static final int GENSTAMP_FIELD_NUMBER = 2; + private long genStamp_; + public boolean hasGenStamp() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + public long getGenStamp() { + return genStamp_; + } + + // optional uint64 numBytes = 3; + public static final int NUMBYTES_FIELD_NUMBER = 3; + private long numBytes_; + public boolean hasNumBytes() { + return ((bitField0_ & 0x00000004) == 0x00000004); + } + public long getNumBytes() { + return numBytes_; + } + + private void initFields() { + blockId_ = 0L; + genStamp_ = 0L; + numBytes_ = 0L; + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + if (!hasBlockId()) { + memoizedIsInitialized = 0; + return false; + } + if (!hasGenStamp()) { + memoizedIsInitialized = 0; + return false; + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeUInt64(1, blockId_); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + output.writeUInt64(2, genStamp_); + } + if (((bitField0_ & 0x00000004) == 0x00000004)) { + output.writeUInt64(3, numBytes_); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeUInt64Size(1, blockId_); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + size += com.google.protobuf.CodedOutputStream + .computeUInt64Size(2, genStamp_); + } + if (((bitField0_ & 0x00000004) == 0x00000004)) { + size += com.google.protobuf.CodedOutputStream + .computeUInt64Size(3, numBytes_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto)) { + return super.equals(obj); + } + org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto other = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto) obj; + + boolean result = true; + result = result && (hasBlockId() == other.hasBlockId()); + if (hasBlockId()) { + result = result && (getBlockId() + == other.getBlockId()); + } + result = result && (hasGenStamp() == other.hasGenStamp()); + if (hasGenStamp()) { + result = result && (getGenStamp() + == other.getGenStamp()); + } + result = result && (hasNumBytes() == other.hasNumBytes()); + if (hasNumBytes()) { + result = result && (getNumBytes() + == other.getNumBytes()); + } + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + @java.lang.Override + public int hashCode() { + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + if (hasBlockId()) { + hash = (37 * hash) + BLOCKID_FIELD_NUMBER; + hash = (53 * hash) + hashLong(getBlockId()); + } + if (hasGenStamp()) { + hash = (37 * hash) + GENSTAMP_FIELD_NUMBER; + hash = (53 * hash) + hashLong(getGenStamp()); + } + if (hasNumBytes()) { + hash = (37 * hash) + NUMBYTES_FIELD_NUMBER; + hash = (53 * hash) + hashLong(getNumBytes()); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + return hash; + } + + public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data).buildParsed(); + } + public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data, extensionRegistry) + .buildParsed(); + } + public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data).buildParsed(); + } + public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data, extensionRegistry) + .buildParsed(); + } + public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto parseFrom(java.io.InputStream input) + throws java.io.IOException { + return newBuilder().mergeFrom(input).buildParsed(); + } + public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return newBuilder().mergeFrom(input, extensionRegistry) + .buildParsed(); + } + public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + Builder builder = newBuilder(); + if (builder.mergeDelimitedFrom(input)) { + return builder.buildParsed(); + } else { + return null; + } + } + public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + Builder builder = newBuilder(); + if (builder.mergeDelimitedFrom(input, extensionRegistry)) { + return builder.buildParsed(); + } else { + return null; + } + } + public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return newBuilder().mergeFrom(input).buildParsed(); + } + public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return newBuilder().mergeFrom(input, extensionRegistry) + .buildParsed(); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProtoOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_BlockProto_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_BlockProto_fieldAccessorTable; + } + + // Construct using org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + blockId_ = 0L; + bitField0_ = (bitField0_ & ~0x00000001); + genStamp_ = 0L; + bitField0_ = (bitField0_ & ~0x00000002); + numBytes_ = 0L; + bitField0_ = (bitField0_ & ~0x00000004); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.getDescriptor(); + } + + public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto getDefaultInstanceForType() { + return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.getDefaultInstance(); + } + + public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto build() { + org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto buildParsed() + throws com.google.protobuf.InvalidProtocolBufferException { + org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException( + result).asInvalidProtocolBufferException(); + } + return result; + } + + public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto buildPartial() { + org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto result = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + result.blockId_ = blockId_; + if (((from_bitField0_ & 0x00000002) == 0x00000002)) { + to_bitField0_ |= 0x00000002; + } + result.genStamp_ = genStamp_; + if (((from_bitField0_ & 0x00000004) == 0x00000004)) { + to_bitField0_ |= 0x00000004; + } + result.numBytes_ = numBytes_; + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto) { + return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto other) { + if (other == org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.getDefaultInstance()) return this; + if (other.hasBlockId()) { + setBlockId(other.getBlockId()); + } + if (other.hasGenStamp()) { + setGenStamp(other.getGenStamp()); + } + if (other.hasNumBytes()) { + setNumBytes(other.getNumBytes()); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + if (!hasBlockId()) { + + return false; + } + if (!hasGenStamp()) { + + return false; + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder( + this.getUnknownFields()); + while (true) { + int tag = input.readTag(); + switch (tag) { + case 0: + this.setUnknownFields(unknownFields.build()); + onChanged(); + return this; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + this.setUnknownFields(unknownFields.build()); + onChanged(); + return this; + } + break; + } + case 8: { + bitField0_ |= 0x00000001; + blockId_ = input.readUInt64(); + break; + } + case 16: { + bitField0_ |= 0x00000002; + genStamp_ = input.readUInt64(); + break; + } + case 24: { + bitField0_ |= 0x00000004; + numBytes_ = input.readUInt64(); + break; + } + } + } + } + + private int bitField0_; + + // required uint64 blockId = 1; + private long blockId_ ; + public boolean hasBlockId() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + public long getBlockId() { + return blockId_; + } + public Builder setBlockId(long value) { + bitField0_ |= 0x00000001; + blockId_ = value; + onChanged(); + return this; + } + public Builder clearBlockId() { + bitField0_ = (bitField0_ & ~0x00000001); + blockId_ = 0L; + onChanged(); + return this; + } + + // required uint64 genStamp = 2; + private long genStamp_ ; + public boolean hasGenStamp() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + public long getGenStamp() { + return genStamp_; + } + public Builder setGenStamp(long value) { + bitField0_ |= 0x00000002; + genStamp_ = value; + onChanged(); + return this; + } + public Builder clearGenStamp() { + bitField0_ = (bitField0_ & ~0x00000002); + genStamp_ = 0L; + onChanged(); + return this; + } + + // optional uint64 numBytes = 3; + private long numBytes_ ; + public boolean hasNumBytes() { + return ((bitField0_ & 0x00000004) == 0x00000004); + } + public long getNumBytes() { + return numBytes_; + } + public Builder setNumBytes(long value) { + bitField0_ |= 0x00000004; + numBytes_ = value; + onChanged(); + return this; + } + public Builder clearNumBytes() { + bitField0_ = (bitField0_ & ~0x00000004); + numBytes_ = 0L; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:BlockProto) + } + + static { + defaultInstance = new BlockProto(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:BlockProto) + } + + public interface BlockWithLocationsProtoOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // required .BlockProto block = 1; + boolean hasBlock(); + org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto getBlock(); + org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProtoOrBuilder getBlockOrBuilder(); + + // repeated .DatanodeIDProto datanodeIDs = 2; + java.util.List + getDatanodeIDsList(); + org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto getDatanodeIDs(int index); + int getDatanodeIDsCount(); + java.util.List + getDatanodeIDsOrBuilderList(); + org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProtoOrBuilder getDatanodeIDsOrBuilder( + int index); + } + public static final class BlockWithLocationsProto extends + com.google.protobuf.GeneratedMessage + implements BlockWithLocationsProtoOrBuilder { + // Use BlockWithLocationsProto.newBuilder() to construct. + private BlockWithLocationsProto(Builder builder) { + super(builder); + } + private BlockWithLocationsProto(boolean noInit) {} + + private static final BlockWithLocationsProto defaultInstance; + public static BlockWithLocationsProto getDefaultInstance() { + return defaultInstance; + } + + public BlockWithLocationsProto getDefaultInstanceForType() { + return defaultInstance; + } + + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_BlockWithLocationsProto_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_BlockWithLocationsProto_fieldAccessorTable; + } + + private int bitField0_; + // required .BlockProto block = 1; + public static final int BLOCK_FIELD_NUMBER = 1; + private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto block_; + public boolean hasBlock() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto getBlock() { + return block_; + } + public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProtoOrBuilder getBlockOrBuilder() { + return block_; + } + + // repeated .DatanodeIDProto datanodeIDs = 2; + public static final int DATANODEIDS_FIELD_NUMBER = 2; + private java.util.List datanodeIDs_; + public java.util.List getDatanodeIDsList() { + return datanodeIDs_; + } + public java.util.List + getDatanodeIDsOrBuilderList() { + return datanodeIDs_; + } + public int getDatanodeIDsCount() { + return datanodeIDs_.size(); + } + public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto getDatanodeIDs(int index) { + return datanodeIDs_.get(index); + } + public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProtoOrBuilder getDatanodeIDsOrBuilder( + int index) { + return datanodeIDs_.get(index); + } + + private void initFields() { + block_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.getDefaultInstance(); + datanodeIDs_ = java.util.Collections.emptyList(); + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + if (!hasBlock()) { + memoizedIsInitialized = 0; + return false; + } + if (!getBlock().isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + for (int i = 0; i < getDatanodeIDsCount(); i++) { + if (!getDatanodeIDs(i).isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeMessage(1, block_); + } + for (int i = 0; i < datanodeIDs_.size(); i++) { + output.writeMessage(2, datanodeIDs_.get(i)); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(1, block_); + } + for (int i = 0; i < datanodeIDs_.size(); i++) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(2, datanodeIDs_.get(i)); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockWithLocationsProto)) { + return super.equals(obj); + } + org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockWithLocationsProto other = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockWithLocationsProto) obj; + + boolean result = true; + result = result && (hasBlock() == other.hasBlock()); + if (hasBlock()) { + result = result && getBlock() + .equals(other.getBlock()); + } + result = result && getDatanodeIDsList() + .equals(other.getDatanodeIDsList()); + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + @java.lang.Override + public int hashCode() { + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + if (hasBlock()) { + hash = (37 * hash) + BLOCK_FIELD_NUMBER; + hash = (53 * hash) + getBlock().hashCode(); + } + if (getDatanodeIDsCount() > 0) { + hash = (37 * hash) + DATANODEIDS_FIELD_NUMBER; + hash = (53 * hash) + getDatanodeIDsList().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + return hash; + } + + public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockWithLocationsProto parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data).buildParsed(); + } + public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockWithLocationsProto parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data, extensionRegistry) + .buildParsed(); + } + public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockWithLocationsProto parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data).buildParsed(); + } + public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockWithLocationsProto parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data, extensionRegistry) + .buildParsed(); + } + public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockWithLocationsProto parseFrom(java.io.InputStream input) + throws java.io.IOException { + return newBuilder().mergeFrom(input).buildParsed(); + } + public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockWithLocationsProto parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return newBuilder().mergeFrom(input, extensionRegistry) + .buildParsed(); + } + public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockWithLocationsProto parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + Builder builder = newBuilder(); + if (builder.mergeDelimitedFrom(input)) { + return builder.buildParsed(); + } else { + return null; + } + } + public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockWithLocationsProto parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + Builder builder = newBuilder(); + if (builder.mergeDelimitedFrom(input, extensionRegistry)) { + return builder.buildParsed(); + } else { + return null; + } + } + public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockWithLocationsProto parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return newBuilder().mergeFrom(input).buildParsed(); + } + public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockWithLocationsProto parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return newBuilder().mergeFrom(input, extensionRegistry) + .buildParsed(); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockWithLocationsProto prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockWithLocationsProtoOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_BlockWithLocationsProto_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_BlockWithLocationsProto_fieldAccessorTable; + } + + // Construct using org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockWithLocationsProto.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + getBlockFieldBuilder(); + getDatanodeIDsFieldBuilder(); + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + if (blockBuilder_ == null) { + block_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.getDefaultInstance(); + } else { + blockBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000001); + if (datanodeIDsBuilder_ == null) { + datanodeIDs_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000002); + } else { + datanodeIDsBuilder_.clear(); + } + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockWithLocationsProto.getDescriptor(); + } + + public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockWithLocationsProto getDefaultInstanceForType() { + return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockWithLocationsProto.getDefaultInstance(); + } + + public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockWithLocationsProto build() { + org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockWithLocationsProto result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockWithLocationsProto buildParsed() + throws com.google.protobuf.InvalidProtocolBufferException { + org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockWithLocationsProto result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException( + result).asInvalidProtocolBufferException(); + } + return result; + } + + public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockWithLocationsProto buildPartial() { + org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockWithLocationsProto result = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockWithLocationsProto(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + if (blockBuilder_ == null) { + result.block_ = block_; + } else { + result.block_ = blockBuilder_.build(); + } + if (datanodeIDsBuilder_ == null) { + if (((bitField0_ & 0x00000002) == 0x00000002)) { + datanodeIDs_ = java.util.Collections.unmodifiableList(datanodeIDs_); + bitField0_ = (bitField0_ & ~0x00000002); + } + result.datanodeIDs_ = datanodeIDs_; + } else { + result.datanodeIDs_ = datanodeIDsBuilder_.build(); + } + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockWithLocationsProto) { + return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockWithLocationsProto)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockWithLocationsProto other) { + if (other == org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockWithLocationsProto.getDefaultInstance()) return this; + if (other.hasBlock()) { + mergeBlock(other.getBlock()); + } + if (datanodeIDsBuilder_ == null) { + if (!other.datanodeIDs_.isEmpty()) { + if (datanodeIDs_.isEmpty()) { + datanodeIDs_ = other.datanodeIDs_; + bitField0_ = (bitField0_ & ~0x00000002); + } else { + ensureDatanodeIDsIsMutable(); + datanodeIDs_.addAll(other.datanodeIDs_); + } + onChanged(); + } + } else { + if (!other.datanodeIDs_.isEmpty()) { + if (datanodeIDsBuilder_.isEmpty()) { + datanodeIDsBuilder_.dispose(); + datanodeIDsBuilder_ = null; + datanodeIDs_ = other.datanodeIDs_; + bitField0_ = (bitField0_ & ~0x00000002); + datanodeIDsBuilder_ = + com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ? + getDatanodeIDsFieldBuilder() : null; + } else { + datanodeIDsBuilder_.addAllMessages(other.datanodeIDs_); + } + } + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + if (!hasBlock()) { + + return false; + } + if (!getBlock().isInitialized()) { + + return false; + } + for (int i = 0; i < getDatanodeIDsCount(); i++) { + if (!getDatanodeIDs(i).isInitialized()) { + + return false; + } + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder( + this.getUnknownFields()); + while (true) { + int tag = input.readTag(); + switch (tag) { + case 0: + this.setUnknownFields(unknownFields.build()); + onChanged(); + return this; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + this.setUnknownFields(unknownFields.build()); + onChanged(); + return this; + } + break; + } + case 10: { + org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.Builder subBuilder = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.newBuilder(); + if (hasBlock()) { + subBuilder.mergeFrom(getBlock()); + } + input.readMessage(subBuilder, extensionRegistry); + setBlock(subBuilder.buildPartial()); + break; + } + case 18: { + org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.Builder subBuilder = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.newBuilder(); + input.readMessage(subBuilder, extensionRegistry); + addDatanodeIDs(subBuilder.buildPartial()); + break; + } + } + } + } + + private int bitField0_; + + // required .BlockProto block = 1; + private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto block_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.getDefaultInstance(); + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProtoOrBuilder> blockBuilder_; + public boolean hasBlock() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto getBlock() { + if (blockBuilder_ == null) { + return block_; + } else { + return blockBuilder_.getMessage(); + } + } + public Builder setBlock(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto value) { + if (blockBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + block_ = value; + onChanged(); + } else { + blockBuilder_.setMessage(value); + } + bitField0_ |= 0x00000001; + return this; + } + public Builder setBlock( + org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.Builder builderForValue) { + if (blockBuilder_ == null) { + block_ = builderForValue.build(); + onChanged(); + } else { + blockBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000001; + return this; + } + public Builder mergeBlock(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto value) { + if (blockBuilder_ == null) { + if (((bitField0_ & 0x00000001) == 0x00000001) && + block_ != org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.getDefaultInstance()) { + block_ = + org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.newBuilder(block_).mergeFrom(value).buildPartial(); + } else { + block_ = value; + } + onChanged(); + } else { + blockBuilder_.mergeFrom(value); + } + bitField0_ |= 0x00000001; + return this; + } + public Builder clearBlock() { + if (blockBuilder_ == null) { + block_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.getDefaultInstance(); + onChanged(); + } else { + blockBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000001); + return this; + } + public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.Builder getBlockBuilder() { + bitField0_ |= 0x00000001; + onChanged(); + return getBlockFieldBuilder().getBuilder(); + } + public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProtoOrBuilder getBlockOrBuilder() { + if (blockBuilder_ != null) { + return blockBuilder_.getMessageOrBuilder(); + } else { + return block_; + } + } + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProtoOrBuilder> + getBlockFieldBuilder() { + if (blockBuilder_ == null) { + blockBuilder_ = new com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProtoOrBuilder>( + block_, + getParentForChildren(), + isClean()); + block_ = null; + } + return blockBuilder_; + } + + // repeated .DatanodeIDProto datanodeIDs = 2; + private java.util.List datanodeIDs_ = + java.util.Collections.emptyList(); + private void ensureDatanodeIDsIsMutable() { + if (!((bitField0_ & 0x00000002) == 0x00000002)) { + datanodeIDs_ = new java.util.ArrayList(datanodeIDs_); + bitField0_ |= 0x00000002; + } + } + + private com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProtoOrBuilder> datanodeIDsBuilder_; + + public java.util.List getDatanodeIDsList() { + if (datanodeIDsBuilder_ == null) { + return java.util.Collections.unmodifiableList(datanodeIDs_); + } else { + return datanodeIDsBuilder_.getMessageList(); + } + } + public int getDatanodeIDsCount() { + if (datanodeIDsBuilder_ == null) { + return datanodeIDs_.size(); + } else { + return datanodeIDsBuilder_.getCount(); + } + } + public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto getDatanodeIDs(int index) { + if (datanodeIDsBuilder_ == null) { + return datanodeIDs_.get(index); + } else { + return datanodeIDsBuilder_.getMessage(index); + } + } + public Builder setDatanodeIDs( + int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto value) { + if (datanodeIDsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureDatanodeIDsIsMutable(); + datanodeIDs_.set(index, value); + onChanged(); + } else { + datanodeIDsBuilder_.setMessage(index, value); + } + return this; + } + public Builder setDatanodeIDs( + int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.Builder builderForValue) { + if (datanodeIDsBuilder_ == null) { + ensureDatanodeIDsIsMutable(); + datanodeIDs_.set(index, builderForValue.build()); + onChanged(); + } else { + datanodeIDsBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + public Builder addDatanodeIDs(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto value) { + if (datanodeIDsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureDatanodeIDsIsMutable(); + datanodeIDs_.add(value); + onChanged(); + } else { + datanodeIDsBuilder_.addMessage(value); + } + return this; + } + public Builder addDatanodeIDs( + int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto value) { + if (datanodeIDsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureDatanodeIDsIsMutable(); + datanodeIDs_.add(index, value); + onChanged(); + } else { + datanodeIDsBuilder_.addMessage(index, value); + } + return this; + } + public Builder addDatanodeIDs( + org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.Builder builderForValue) { + if (datanodeIDsBuilder_ == null) { + ensureDatanodeIDsIsMutable(); + datanodeIDs_.add(builderForValue.build()); + onChanged(); + } else { + datanodeIDsBuilder_.addMessage(builderForValue.build()); + } + return this; + } + public Builder addDatanodeIDs( + int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.Builder builderForValue) { + if (datanodeIDsBuilder_ == null) { + ensureDatanodeIDsIsMutable(); + datanodeIDs_.add(index, builderForValue.build()); + onChanged(); + } else { + datanodeIDsBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + public Builder addAllDatanodeIDs( + java.lang.Iterable values) { + if (datanodeIDsBuilder_ == null) { + ensureDatanodeIDsIsMutable(); + super.addAll(values, datanodeIDs_); + onChanged(); + } else { + datanodeIDsBuilder_.addAllMessages(values); + } + return this; + } + public Builder clearDatanodeIDs() { + if (datanodeIDsBuilder_ == null) { + datanodeIDs_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000002); + onChanged(); + } else { + datanodeIDsBuilder_.clear(); + } + return this; + } + public Builder removeDatanodeIDs(int index) { + if (datanodeIDsBuilder_ == null) { + ensureDatanodeIDsIsMutable(); + datanodeIDs_.remove(index); + onChanged(); + } else { + datanodeIDsBuilder_.remove(index); + } + return this; + } + public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.Builder getDatanodeIDsBuilder( + int index) { + return getDatanodeIDsFieldBuilder().getBuilder(index); + } + public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProtoOrBuilder getDatanodeIDsOrBuilder( + int index) { + if (datanodeIDsBuilder_ == null) { + return datanodeIDs_.get(index); } else { + return datanodeIDsBuilder_.getMessageOrBuilder(index); + } + } + public java.util.List + getDatanodeIDsOrBuilderList() { + if (datanodeIDsBuilder_ != null) { + return datanodeIDsBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(datanodeIDs_); + } + } + public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.Builder addDatanodeIDsBuilder() { + return getDatanodeIDsFieldBuilder().addBuilder( + org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.getDefaultInstance()); + } + public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.Builder addDatanodeIDsBuilder( + int index) { + return getDatanodeIDsFieldBuilder().addBuilder( + index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.getDefaultInstance()); + } + public java.util.List + getDatanodeIDsBuilderList() { + return getDatanodeIDsFieldBuilder().getBuilderList(); + } + private com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProtoOrBuilder> + getDatanodeIDsFieldBuilder() { + if (datanodeIDsBuilder_ == null) { + datanodeIDsBuilder_ = new com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProtoOrBuilder>( + datanodeIDs_, + ((bitField0_ & 0x00000002) == 0x00000002), + getParentForChildren(), + isClean()); + datanodeIDs_ = null; + } + return datanodeIDsBuilder_; + } + + // @@protoc_insertion_point(builder_scope:BlockWithLocationsProto) + } + + static { + defaultInstance = new BlockWithLocationsProto(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:BlockWithLocationsProto) + } + + public interface BlocksWithLocationsProtoOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // repeated .BlockWithLocationsProto blocks = 1; + java.util.List + getBlocksList(); + org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockWithLocationsProto getBlocks(int index); + int getBlocksCount(); + java.util.List + getBlocksOrBuilderList(); + org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockWithLocationsProtoOrBuilder getBlocksOrBuilder( + int index); + } + public static final class BlocksWithLocationsProto extends + com.google.protobuf.GeneratedMessage + implements BlocksWithLocationsProtoOrBuilder { + // Use BlocksWithLocationsProto.newBuilder() to construct. + private BlocksWithLocationsProto(Builder builder) { + super(builder); + } + private BlocksWithLocationsProto(boolean noInit) {} + + private static final BlocksWithLocationsProto defaultInstance; + public static BlocksWithLocationsProto getDefaultInstance() { + return defaultInstance; + } + + public BlocksWithLocationsProto getDefaultInstanceForType() { + return defaultInstance; + } + + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_BlocksWithLocationsProto_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_BlocksWithLocationsProto_fieldAccessorTable; + } + + // repeated .BlockWithLocationsProto blocks = 1; + public static final int BLOCKS_FIELD_NUMBER = 1; + private java.util.List blocks_; + public java.util.List getBlocksList() { + return blocks_; + } + public java.util.List + getBlocksOrBuilderList() { + return blocks_; + } + public int getBlocksCount() { + return blocks_.size(); + } + public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockWithLocationsProto getBlocks(int index) { + return blocks_.get(index); + } + public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockWithLocationsProtoOrBuilder getBlocksOrBuilder( + int index) { + return blocks_.get(index); + } + + private void initFields() { + blocks_ = java.util.Collections.emptyList(); + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + for (int i = 0; i < getBlocksCount(); i++) { + if (!getBlocks(i).isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + for (int i = 0; i < blocks_.size(); i++) { + output.writeMessage(1, blocks_.get(i)); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + for (int i = 0; i < blocks_.size(); i++) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(1, blocks_.get(i)); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlocksWithLocationsProto)) { + return super.equals(obj); + } + org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlocksWithLocationsProto other = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlocksWithLocationsProto) obj; + + boolean result = true; + result = result && getBlocksList() + .equals(other.getBlocksList()); + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + @java.lang.Override + public int hashCode() { + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + if (getBlocksCount() > 0) { + hash = (37 * hash) + BLOCKS_FIELD_NUMBER; + hash = (53 * hash) + getBlocksList().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + return hash; + } + + public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlocksWithLocationsProto parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data).buildParsed(); + } + public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlocksWithLocationsProto parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data, extensionRegistry) + .buildParsed(); + } + public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlocksWithLocationsProto parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data).buildParsed(); + } + public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlocksWithLocationsProto parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data, extensionRegistry) + .buildParsed(); + } + public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlocksWithLocationsProto parseFrom(java.io.InputStream input) + throws java.io.IOException { + return newBuilder().mergeFrom(input).buildParsed(); + } + public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlocksWithLocationsProto parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return newBuilder().mergeFrom(input, extensionRegistry) + .buildParsed(); + } + public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlocksWithLocationsProto parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + Builder builder = newBuilder(); + if (builder.mergeDelimitedFrom(input)) { + return builder.buildParsed(); + } else { + return null; + } + } + public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlocksWithLocationsProto parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + Builder builder = newBuilder(); + if (builder.mergeDelimitedFrom(input, extensionRegistry)) { + return builder.buildParsed(); + } else { + return null; + } + } + public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlocksWithLocationsProto parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return newBuilder().mergeFrom(input).buildParsed(); + } + public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlocksWithLocationsProto parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return newBuilder().mergeFrom(input, extensionRegistry) + .buildParsed(); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlocksWithLocationsProto prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlocksWithLocationsProtoOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_BlocksWithLocationsProto_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_BlocksWithLocationsProto_fieldAccessorTable; + } + + // Construct using org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlocksWithLocationsProto.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + getBlocksFieldBuilder(); + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + if (blocksBuilder_ == null) { + blocks_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000001); + } else { + blocksBuilder_.clear(); + } + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlocksWithLocationsProto.getDescriptor(); + } + + public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlocksWithLocationsProto getDefaultInstanceForType() { + return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlocksWithLocationsProto.getDefaultInstance(); + } + + public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlocksWithLocationsProto build() { + org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlocksWithLocationsProto result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlocksWithLocationsProto buildParsed() + throws com.google.protobuf.InvalidProtocolBufferException { + org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlocksWithLocationsProto result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException( + result).asInvalidProtocolBufferException(); + } + return result; + } + + public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlocksWithLocationsProto buildPartial() { + org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlocksWithLocationsProto result = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlocksWithLocationsProto(this); + int from_bitField0_ = bitField0_; + if (blocksBuilder_ == null) { + if (((bitField0_ & 0x00000001) == 0x00000001)) { + blocks_ = java.util.Collections.unmodifiableList(blocks_); + bitField0_ = (bitField0_ & ~0x00000001); + } + result.blocks_ = blocks_; + } else { + result.blocks_ = blocksBuilder_.build(); + } + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlocksWithLocationsProto) { + return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlocksWithLocationsProto)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlocksWithLocationsProto other) { + if (other == org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlocksWithLocationsProto.getDefaultInstance()) return this; + if (blocksBuilder_ == null) { + if (!other.blocks_.isEmpty()) { + if (blocks_.isEmpty()) { + blocks_ = other.blocks_; + bitField0_ = (bitField0_ & ~0x00000001); + } else { + ensureBlocksIsMutable(); + blocks_.addAll(other.blocks_); + } + onChanged(); + } + } else { + if (!other.blocks_.isEmpty()) { + if (blocksBuilder_.isEmpty()) { + blocksBuilder_.dispose(); + blocksBuilder_ = null; + blocks_ = other.blocks_; + bitField0_ = (bitField0_ & ~0x00000001); + blocksBuilder_ = + com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ? + getBlocksFieldBuilder() : null; + } else { + blocksBuilder_.addAllMessages(other.blocks_); + } + } + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + for (int i = 0; i < getBlocksCount(); i++) { + if (!getBlocks(i).isInitialized()) { + + return false; + } + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder( + this.getUnknownFields()); + while (true) { + int tag = input.readTag(); + switch (tag) { + case 0: + this.setUnknownFields(unknownFields.build()); + onChanged(); + return this; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + this.setUnknownFields(unknownFields.build()); + onChanged(); + return this; + } + break; + } + case 10: { + org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockWithLocationsProto.Builder subBuilder = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockWithLocationsProto.newBuilder(); + input.readMessage(subBuilder, extensionRegistry); + addBlocks(subBuilder.buildPartial()); + break; + } + } + } + } + + private int bitField0_; + + // repeated .BlockWithLocationsProto blocks = 1; + private java.util.List blocks_ = + java.util.Collections.emptyList(); + private void ensureBlocksIsMutable() { + if (!((bitField0_ & 0x00000001) == 0x00000001)) { + blocks_ = new java.util.ArrayList(blocks_); + bitField0_ |= 0x00000001; + } + } + + private com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockWithLocationsProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockWithLocationsProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockWithLocationsProtoOrBuilder> blocksBuilder_; + + public java.util.List getBlocksList() { + if (blocksBuilder_ == null) { + return java.util.Collections.unmodifiableList(blocks_); + } else { + return blocksBuilder_.getMessageList(); + } + } + public int getBlocksCount() { + if (blocksBuilder_ == null) { + return blocks_.size(); + } else { + return blocksBuilder_.getCount(); + } + } + public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockWithLocationsProto getBlocks(int index) { + if (blocksBuilder_ == null) { + return blocks_.get(index); + } else { + return blocksBuilder_.getMessage(index); + } + } + public Builder setBlocks( + int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockWithLocationsProto value) { + if (blocksBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureBlocksIsMutable(); + blocks_.set(index, value); + onChanged(); + } else { + blocksBuilder_.setMessage(index, value); + } + return this; + } + public Builder setBlocks( + int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockWithLocationsProto.Builder builderForValue) { + if (blocksBuilder_ == null) { + ensureBlocksIsMutable(); + blocks_.set(index, builderForValue.build()); + onChanged(); + } else { + blocksBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + public Builder addBlocks(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockWithLocationsProto value) { + if (blocksBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureBlocksIsMutable(); + blocks_.add(value); + onChanged(); + } else { + blocksBuilder_.addMessage(value); + } + return this; + } + public Builder addBlocks( + int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockWithLocationsProto value) { + if (blocksBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureBlocksIsMutable(); + blocks_.add(index, value); + onChanged(); + } else { + blocksBuilder_.addMessage(index, value); + } + return this; + } + public Builder addBlocks( + org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockWithLocationsProto.Builder builderForValue) { + if (blocksBuilder_ == null) { + ensureBlocksIsMutable(); + blocks_.add(builderForValue.build()); + onChanged(); + } else { + blocksBuilder_.addMessage(builderForValue.build()); + } + return this; + } + public Builder addBlocks( + int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockWithLocationsProto.Builder builderForValue) { + if (blocksBuilder_ == null) { + ensureBlocksIsMutable(); + blocks_.add(index, builderForValue.build()); + onChanged(); + } else { + blocksBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + public Builder addAllBlocks( + java.lang.Iterable values) { + if (blocksBuilder_ == null) { + ensureBlocksIsMutable(); + super.addAll(values, blocks_); + onChanged(); + } else { + blocksBuilder_.addAllMessages(values); + } + return this; + } + public Builder clearBlocks() { + if (blocksBuilder_ == null) { + blocks_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + } else { + blocksBuilder_.clear(); + } + return this; + } + public Builder removeBlocks(int index) { + if (blocksBuilder_ == null) { + ensureBlocksIsMutable(); + blocks_.remove(index); + onChanged(); + } else { + blocksBuilder_.remove(index); + } + return this; + } + public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockWithLocationsProto.Builder getBlocksBuilder( + int index) { + return getBlocksFieldBuilder().getBuilder(index); + } + public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockWithLocationsProtoOrBuilder getBlocksOrBuilder( + int index) { + if (blocksBuilder_ == null) { + return blocks_.get(index); } else { + return blocksBuilder_.getMessageOrBuilder(index); + } + } + public java.util.List + getBlocksOrBuilderList() { + if (blocksBuilder_ != null) { + return blocksBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(blocks_); + } + } + public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockWithLocationsProto.Builder addBlocksBuilder() { + return getBlocksFieldBuilder().addBuilder( + org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockWithLocationsProto.getDefaultInstance()); + } + public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockWithLocationsProto.Builder addBlocksBuilder( + int index) { + return getBlocksFieldBuilder().addBuilder( + index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockWithLocationsProto.getDefaultInstance()); + } + public java.util.List + getBlocksBuilderList() { + return getBlocksFieldBuilder().getBuilderList(); + } + private com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockWithLocationsProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockWithLocationsProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockWithLocationsProtoOrBuilder> + getBlocksFieldBuilder() { + if (blocksBuilder_ == null) { + blocksBuilder_ = new com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockWithLocationsProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockWithLocationsProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockWithLocationsProtoOrBuilder>( + blocks_, + ((bitField0_ & 0x00000001) == 0x00000001), + getParentForChildren(), + isClean()); + blocks_ = null; + } + return blocksBuilder_; + } + + // @@protoc_insertion_point(builder_scope:BlocksWithLocationsProto) + } + + static { + defaultInstance = new BlocksWithLocationsProto(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:BlocksWithLocationsProto) + } + + public interface RemoteEditLogProtoOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // required uint64 startTxId = 1; + boolean hasStartTxId(); + long getStartTxId(); + + // required uint64 endTxId = 2; + boolean hasEndTxId(); + long getEndTxId(); + } + public static final class RemoteEditLogProto extends + com.google.protobuf.GeneratedMessage + implements RemoteEditLogProtoOrBuilder { + // Use RemoteEditLogProto.newBuilder() to construct. + private RemoteEditLogProto(Builder builder) { + super(builder); + } + private RemoteEditLogProto(boolean noInit) {} + + private static final RemoteEditLogProto defaultInstance; + public static RemoteEditLogProto getDefaultInstance() { + return defaultInstance; + } + + public RemoteEditLogProto getDefaultInstanceForType() { + return defaultInstance; + } + + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_RemoteEditLogProto_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_RemoteEditLogProto_fieldAccessorTable; + } + + private int bitField0_; + // required uint64 startTxId = 1; + public static final int STARTTXID_FIELD_NUMBER = 1; + private long startTxId_; + public boolean hasStartTxId() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + public long getStartTxId() { + return startTxId_; + } + + // required uint64 endTxId = 2; + public static final int ENDTXID_FIELD_NUMBER = 2; + private long endTxId_; + public boolean hasEndTxId() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + public long getEndTxId() { + return endTxId_; + } + + private void initFields() { + startTxId_ = 0L; + endTxId_ = 0L; + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + if (!hasStartTxId()) { + memoizedIsInitialized = 0; + return false; + } + if (!hasEndTxId()) { + memoizedIsInitialized = 0; + return false; + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeUInt64(1, startTxId_); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + output.writeUInt64(2, endTxId_); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeUInt64Size(1, startTxId_); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + size += com.google.protobuf.CodedOutputStream + .computeUInt64Size(2, endTxId_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogProto)) { + return super.equals(obj); + } + org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogProto other = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogProto) obj; + + boolean result = true; + result = result && (hasStartTxId() == other.hasStartTxId()); + if (hasStartTxId()) { + result = result && (getStartTxId() + == other.getStartTxId()); + } + result = result && (hasEndTxId() == other.hasEndTxId()); + if (hasEndTxId()) { + result = result && (getEndTxId() + == other.getEndTxId()); + } + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + @java.lang.Override + public int hashCode() { + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + if (hasStartTxId()) { + hash = (37 * hash) + STARTTXID_FIELD_NUMBER; + hash = (53 * hash) + hashLong(getStartTxId()); + } + if (hasEndTxId()) { + hash = (37 * hash) + ENDTXID_FIELD_NUMBER; + hash = (53 * hash) + hashLong(getEndTxId()); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + return hash; + } + + public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogProto parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data).buildParsed(); + } + public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogProto parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data, extensionRegistry) + .buildParsed(); + } + public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogProto parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data).buildParsed(); + } + public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogProto parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data, extensionRegistry) + .buildParsed(); + } + public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogProto parseFrom(java.io.InputStream input) + throws java.io.IOException { + return newBuilder().mergeFrom(input).buildParsed(); + } + public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogProto parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return newBuilder().mergeFrom(input, extensionRegistry) + .buildParsed(); + } + public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogProto parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + Builder builder = newBuilder(); + if (builder.mergeDelimitedFrom(input)) { + return builder.buildParsed(); + } else { + return null; + } + } + public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogProto parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + Builder builder = newBuilder(); + if (builder.mergeDelimitedFrom(input, extensionRegistry)) { + return builder.buildParsed(); + } else { + return null; + } + } + public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogProto parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return newBuilder().mergeFrom(input).buildParsed(); + } + public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogProto parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return newBuilder().mergeFrom(input, extensionRegistry) + .buildParsed(); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogProto prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogProtoOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_RemoteEditLogProto_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_RemoteEditLogProto_fieldAccessorTable; + } + + // Construct using org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogProto.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + startTxId_ = 0L; + bitField0_ = (bitField0_ & ~0x00000001); + endTxId_ = 0L; + bitField0_ = (bitField0_ & ~0x00000002); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogProto.getDescriptor(); + } + + public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogProto getDefaultInstanceForType() { + return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogProto.getDefaultInstance(); + } + + public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogProto build() { + org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogProto result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogProto buildParsed() + throws com.google.protobuf.InvalidProtocolBufferException { + org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogProto result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException( + result).asInvalidProtocolBufferException(); + } + return result; + } + + public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogProto buildPartial() { + org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogProto result = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogProto(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + result.startTxId_ = startTxId_; + if (((from_bitField0_ & 0x00000002) == 0x00000002)) { + to_bitField0_ |= 0x00000002; + } + result.endTxId_ = endTxId_; + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogProto) { + return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogProto)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogProto other) { + if (other == org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogProto.getDefaultInstance()) return this; + if (other.hasStartTxId()) { + setStartTxId(other.getStartTxId()); + } + if (other.hasEndTxId()) { + setEndTxId(other.getEndTxId()); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + if (!hasStartTxId()) { + + return false; + } + if (!hasEndTxId()) { + + return false; + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder( + this.getUnknownFields()); + while (true) { + int tag = input.readTag(); + switch (tag) { + case 0: + this.setUnknownFields(unknownFields.build()); + onChanged(); + return this; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + this.setUnknownFields(unknownFields.build()); + onChanged(); + return this; + } + break; + } + case 8: { + bitField0_ |= 0x00000001; + startTxId_ = input.readUInt64(); + break; + } + case 16: { + bitField0_ |= 0x00000002; + endTxId_ = input.readUInt64(); + break; + } + } + } + } + + private int bitField0_; + + // required uint64 startTxId = 1; + private long startTxId_ ; + public boolean hasStartTxId() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + public long getStartTxId() { + return startTxId_; + } + public Builder setStartTxId(long value) { + bitField0_ |= 0x00000001; + startTxId_ = value; + onChanged(); + return this; + } + public Builder clearStartTxId() { + bitField0_ = (bitField0_ & ~0x00000001); + startTxId_ = 0L; + onChanged(); + return this; + } + + // required uint64 endTxId = 2; + private long endTxId_ ; + public boolean hasEndTxId() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + public long getEndTxId() { + return endTxId_; + } + public Builder setEndTxId(long value) { + bitField0_ |= 0x00000002; + endTxId_ = value; + onChanged(); + return this; + } + public Builder clearEndTxId() { + bitField0_ = (bitField0_ & ~0x00000002); + endTxId_ = 0L; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:RemoteEditLogProto) + } + + static { + defaultInstance = new RemoteEditLogProto(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:RemoteEditLogProto) + } + + public interface RemoteEditLogManifestProtoOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // repeated .RemoteEditLogProto logs = 1; + java.util.List + getLogsList(); + org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogProto getLogs(int index); + int getLogsCount(); + java.util.List + getLogsOrBuilderList(); + org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogProtoOrBuilder getLogsOrBuilder( + int index); + } + public static final class RemoteEditLogManifestProto extends + com.google.protobuf.GeneratedMessage + implements RemoteEditLogManifestProtoOrBuilder { + // Use RemoteEditLogManifestProto.newBuilder() to construct. + private RemoteEditLogManifestProto(Builder builder) { + super(builder); + } + private RemoteEditLogManifestProto(boolean noInit) {} + + private static final RemoteEditLogManifestProto defaultInstance; + public static RemoteEditLogManifestProto getDefaultInstance() { + return defaultInstance; + } + + public RemoteEditLogManifestProto getDefaultInstanceForType() { + return defaultInstance; + } + + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_RemoteEditLogManifestProto_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_RemoteEditLogManifestProto_fieldAccessorTable; + } + + // repeated .RemoteEditLogProto logs = 1; + public static final int LOGS_FIELD_NUMBER = 1; + private java.util.List logs_; + public java.util.List getLogsList() { + return logs_; + } + public java.util.List + getLogsOrBuilderList() { + return logs_; + } + public int getLogsCount() { + return logs_.size(); + } + public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogProto getLogs(int index) { + return logs_.get(index); + } + public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogProtoOrBuilder getLogsOrBuilder( + int index) { + return logs_.get(index); + } + + private void initFields() { + logs_ = java.util.Collections.emptyList(); + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + for (int i = 0; i < getLogsCount(); i++) { + if (!getLogs(i).isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + for (int i = 0; i < logs_.size(); i++) { + output.writeMessage(1, logs_.get(i)); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + for (int i = 0; i < logs_.size(); i++) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(1, logs_.get(i)); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProto)) { + return super.equals(obj); + } + org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProto other = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProto) obj; + + boolean result = true; + result = result && getLogsList() + .equals(other.getLogsList()); + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + @java.lang.Override + public int hashCode() { + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + if (getLogsCount() > 0) { + hash = (37 * hash) + LOGS_FIELD_NUMBER; + hash = (53 * hash) + getLogsList().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + return hash; + } + + public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProto parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data).buildParsed(); + } + public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProto parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data, extensionRegistry) + .buildParsed(); + } + public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProto parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data).buildParsed(); + } + public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProto parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data, extensionRegistry) + .buildParsed(); + } + public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProto parseFrom(java.io.InputStream input) + throws java.io.IOException { + return newBuilder().mergeFrom(input).buildParsed(); + } + public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProto parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return newBuilder().mergeFrom(input, extensionRegistry) + .buildParsed(); + } + public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProto parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + Builder builder = newBuilder(); + if (builder.mergeDelimitedFrom(input)) { + return builder.buildParsed(); + } else { + return null; + } + } + public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProto parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + Builder builder = newBuilder(); + if (builder.mergeDelimitedFrom(input, extensionRegistry)) { + return builder.buildParsed(); + } else { + return null; + } + } + public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProto parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return newBuilder().mergeFrom(input).buildParsed(); + } + public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProto parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return newBuilder().mergeFrom(input, extensionRegistry) + .buildParsed(); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProto prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProtoOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_RemoteEditLogManifestProto_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_RemoteEditLogManifestProto_fieldAccessorTable; + } + + // Construct using org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProto.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + getLogsFieldBuilder(); + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + if (logsBuilder_ == null) { + logs_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000001); + } else { + logsBuilder_.clear(); + } + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProto.getDescriptor(); + } + + public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProto getDefaultInstanceForType() { + return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProto.getDefaultInstance(); + } + + public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProto build() { + org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProto result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProto buildParsed() + throws com.google.protobuf.InvalidProtocolBufferException { + org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProto result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException( + result).asInvalidProtocolBufferException(); + } + return result; + } + + public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProto buildPartial() { + org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProto result = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProto(this); + int from_bitField0_ = bitField0_; + if (logsBuilder_ == null) { + if (((bitField0_ & 0x00000001) == 0x00000001)) { + logs_ = java.util.Collections.unmodifiableList(logs_); + bitField0_ = (bitField0_ & ~0x00000001); + } + result.logs_ = logs_; + } else { + result.logs_ = logsBuilder_.build(); + } + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProto) { + return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProto)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProto other) { + if (other == org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProto.getDefaultInstance()) return this; + if (logsBuilder_ == null) { + if (!other.logs_.isEmpty()) { + if (logs_.isEmpty()) { + logs_ = other.logs_; + bitField0_ = (bitField0_ & ~0x00000001); + } else { + ensureLogsIsMutable(); + logs_.addAll(other.logs_); + } + onChanged(); + } + } else { + if (!other.logs_.isEmpty()) { + if (logsBuilder_.isEmpty()) { + logsBuilder_.dispose(); + logsBuilder_ = null; + logs_ = other.logs_; + bitField0_ = (bitField0_ & ~0x00000001); + logsBuilder_ = + com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ? + getLogsFieldBuilder() : null; + } else { + logsBuilder_.addAllMessages(other.logs_); + } + } + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + for (int i = 0; i < getLogsCount(); i++) { + if (!getLogs(i).isInitialized()) { + + return false; + } + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder( + this.getUnknownFields()); + while (true) { + int tag = input.readTag(); + switch (tag) { + case 0: + this.setUnknownFields(unknownFields.build()); + onChanged(); + return this; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + this.setUnknownFields(unknownFields.build()); + onChanged(); + return this; + } + break; + } + case 10: { + org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogProto.Builder subBuilder = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogProto.newBuilder(); + input.readMessage(subBuilder, extensionRegistry); + addLogs(subBuilder.buildPartial()); + break; + } + } + } + } + + private int bitField0_; + + // repeated .RemoteEditLogProto logs = 1; + private java.util.List logs_ = + java.util.Collections.emptyList(); + private void ensureLogsIsMutable() { + if (!((bitField0_ & 0x00000001) == 0x00000001)) { + logs_ = new java.util.ArrayList(logs_); + bitField0_ |= 0x00000001; + } + } + + private com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogProtoOrBuilder> logsBuilder_; + + public java.util.List getLogsList() { + if (logsBuilder_ == null) { + return java.util.Collections.unmodifiableList(logs_); + } else { + return logsBuilder_.getMessageList(); + } + } + public int getLogsCount() { + if (logsBuilder_ == null) { + return logs_.size(); + } else { + return logsBuilder_.getCount(); + } + } + public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogProto getLogs(int index) { + if (logsBuilder_ == null) { + return logs_.get(index); + } else { + return logsBuilder_.getMessage(index); + } + } + public Builder setLogs( + int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogProto value) { + if (logsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureLogsIsMutable(); + logs_.set(index, value); + onChanged(); + } else { + logsBuilder_.setMessage(index, value); + } + return this; + } + public Builder setLogs( + int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogProto.Builder builderForValue) { + if (logsBuilder_ == null) { + ensureLogsIsMutable(); + logs_.set(index, builderForValue.build()); + onChanged(); + } else { + logsBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + public Builder addLogs(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogProto value) { + if (logsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureLogsIsMutable(); + logs_.add(value); + onChanged(); + } else { + logsBuilder_.addMessage(value); + } + return this; + } + public Builder addLogs( + int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogProto value) { + if (logsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureLogsIsMutable(); + logs_.add(index, value); + onChanged(); + } else { + logsBuilder_.addMessage(index, value); + } + return this; + } + public Builder addLogs( + org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogProto.Builder builderForValue) { + if (logsBuilder_ == null) { + ensureLogsIsMutable(); + logs_.add(builderForValue.build()); + onChanged(); + } else { + logsBuilder_.addMessage(builderForValue.build()); + } + return this; + } + public Builder addLogs( + int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogProto.Builder builderForValue) { + if (logsBuilder_ == null) { + ensureLogsIsMutable(); + logs_.add(index, builderForValue.build()); + onChanged(); + } else { + logsBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + public Builder addAllLogs( + java.lang.Iterable values) { + if (logsBuilder_ == null) { + ensureLogsIsMutable(); + super.addAll(values, logs_); + onChanged(); + } else { + logsBuilder_.addAllMessages(values); + } + return this; + } + public Builder clearLogs() { + if (logsBuilder_ == null) { + logs_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + } else { + logsBuilder_.clear(); + } + return this; + } + public Builder removeLogs(int index) { + if (logsBuilder_ == null) { + ensureLogsIsMutable(); + logs_.remove(index); + onChanged(); + } else { + logsBuilder_.remove(index); + } + return this; + } + public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogProto.Builder getLogsBuilder( + int index) { + return getLogsFieldBuilder().getBuilder(index); + } + public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogProtoOrBuilder getLogsOrBuilder( + int index) { + if (logsBuilder_ == null) { + return logs_.get(index); } else { + return logsBuilder_.getMessageOrBuilder(index); + } + } + public java.util.List + getLogsOrBuilderList() { + if (logsBuilder_ != null) { + return logsBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(logs_); + } + } + public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogProto.Builder addLogsBuilder() { + return getLogsFieldBuilder().addBuilder( + org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogProto.getDefaultInstance()); + } + public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogProto.Builder addLogsBuilder( + int index) { + return getLogsFieldBuilder().addBuilder( + index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogProto.getDefaultInstance()); + } + public java.util.List + getLogsBuilderList() { + return getLogsFieldBuilder().getBuilderList(); + } + private com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogProtoOrBuilder> + getLogsFieldBuilder() { + if (logsBuilder_ == null) { + logsBuilder_ = new com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogProtoOrBuilder>( + logs_, + ((bitField0_ & 0x00000001) == 0x00000001), + getParentForChildren(), + isClean()); + logs_ = null; + } + return logsBuilder_; + } + + // @@protoc_insertion_point(builder_scope:RemoteEditLogManifestProto) + } + + static { + defaultInstance = new RemoteEditLogManifestProto(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:RemoteEditLogManifestProto) + } + + public interface NamespaceInfoProtoOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // required string buildVersion = 1; + boolean hasBuildVersion(); + String getBuildVersion(); + + // required uint32 distUpgradeVersion = 2; + boolean hasDistUpgradeVersion(); + int getDistUpgradeVersion(); + + // required string blockPoolID = 3; + boolean hasBlockPoolID(); + String getBlockPoolID(); + + // required .StorageInfoProto storageInfo = 4; + boolean hasStorageInfo(); + org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto getStorageInfo(); + org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProtoOrBuilder getStorageInfoOrBuilder(); + } + public static final class NamespaceInfoProto extends + com.google.protobuf.GeneratedMessage + implements NamespaceInfoProtoOrBuilder { + // Use NamespaceInfoProto.newBuilder() to construct. + private NamespaceInfoProto(Builder builder) { + super(builder); + } + private NamespaceInfoProto(boolean noInit) {} + + private static final NamespaceInfoProto defaultInstance; + public static NamespaceInfoProto getDefaultInstance() { + return defaultInstance; + } + + public NamespaceInfoProto getDefaultInstanceForType() { + return defaultInstance; + } + + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_NamespaceInfoProto_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_NamespaceInfoProto_fieldAccessorTable; + } + + private int bitField0_; + // required string buildVersion = 1; + public static final int BUILDVERSION_FIELD_NUMBER = 1; + private java.lang.Object buildVersion_; + public boolean hasBuildVersion() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + public String getBuildVersion() { + java.lang.Object ref = buildVersion_; + if (ref instanceof String) { + return (String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + String s = bs.toStringUtf8(); + if (com.google.protobuf.Internal.isValidUtf8(bs)) { + buildVersion_ = s; + } + return s; + } + } + private com.google.protobuf.ByteString getBuildVersionBytes() { + java.lang.Object ref = buildVersion_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((String) ref); + buildVersion_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + // required uint32 distUpgradeVersion = 2; + public static final int DISTUPGRADEVERSION_FIELD_NUMBER = 2; + private int distUpgradeVersion_; + public boolean hasDistUpgradeVersion() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + public int getDistUpgradeVersion() { + return distUpgradeVersion_; + } + + // required string blockPoolID = 3; + public static final int BLOCKPOOLID_FIELD_NUMBER = 3; + private java.lang.Object blockPoolID_; + public boolean hasBlockPoolID() { + return ((bitField0_ & 0x00000004) == 0x00000004); + } + public String getBlockPoolID() { + java.lang.Object ref = blockPoolID_; + if (ref instanceof String) { + return (String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + String s = bs.toStringUtf8(); + if (com.google.protobuf.Internal.isValidUtf8(bs)) { + blockPoolID_ = s; + } + return s; + } + } + private com.google.protobuf.ByteString getBlockPoolIDBytes() { + java.lang.Object ref = blockPoolID_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((String) ref); + blockPoolID_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + // required .StorageInfoProto storageInfo = 4; + public static final int STORAGEINFO_FIELD_NUMBER = 4; + private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto storageInfo_; + public boolean hasStorageInfo() { + return ((bitField0_ & 0x00000008) == 0x00000008); + } + public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto getStorageInfo() { + return storageInfo_; + } + public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProtoOrBuilder getStorageInfoOrBuilder() { + return storageInfo_; + } + + private void initFields() { + buildVersion_ = ""; + distUpgradeVersion_ = 0; + blockPoolID_ = ""; + storageInfo_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto.getDefaultInstance(); + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + if (!hasBuildVersion()) { + memoizedIsInitialized = 0; + return false; + } + if (!hasDistUpgradeVersion()) { + memoizedIsInitialized = 0; + return false; + } + if (!hasBlockPoolID()) { + memoizedIsInitialized = 0; + return false; + } + if (!hasStorageInfo()) { + memoizedIsInitialized = 0; + return false; + } + if (!getStorageInfo().isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeBytes(1, getBuildVersionBytes()); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + output.writeUInt32(2, distUpgradeVersion_); + } + if (((bitField0_ & 0x00000004) == 0x00000004)) { + output.writeBytes(3, getBlockPoolIDBytes()); + } + if (((bitField0_ & 0x00000008) == 0x00000008)) { + output.writeMessage(4, storageInfo_); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(1, getBuildVersionBytes()); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + size += com.google.protobuf.CodedOutputStream + .computeUInt32Size(2, distUpgradeVersion_); + } + if (((bitField0_ & 0x00000004) == 0x00000004)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(3, getBlockPoolIDBytes()); + } + if (((bitField0_ & 0x00000008) == 0x00000008)) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(4, storageInfo_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto)) { + return super.equals(obj); + } + org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto other = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto) obj; + + boolean result = true; + result = result && (hasBuildVersion() == other.hasBuildVersion()); + if (hasBuildVersion()) { + result = result && getBuildVersion() + .equals(other.getBuildVersion()); + } + result = result && (hasDistUpgradeVersion() == other.hasDistUpgradeVersion()); + if (hasDistUpgradeVersion()) { + result = result && (getDistUpgradeVersion() + == other.getDistUpgradeVersion()); + } + result = result && (hasBlockPoolID() == other.hasBlockPoolID()); + if (hasBlockPoolID()) { + result = result && getBlockPoolID() + .equals(other.getBlockPoolID()); + } + result = result && (hasStorageInfo() == other.hasStorageInfo()); + if (hasStorageInfo()) { + result = result && getStorageInfo() + .equals(other.getStorageInfo()); + } + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + @java.lang.Override + public int hashCode() { + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + if (hasBuildVersion()) { + hash = (37 * hash) + BUILDVERSION_FIELD_NUMBER; + hash = (53 * hash) + getBuildVersion().hashCode(); + } + if (hasDistUpgradeVersion()) { + hash = (37 * hash) + DISTUPGRADEVERSION_FIELD_NUMBER; + hash = (53 * hash) + getDistUpgradeVersion(); + } + if (hasBlockPoolID()) { + hash = (37 * hash) + BLOCKPOOLID_FIELD_NUMBER; + hash = (53 * hash) + getBlockPoolID().hashCode(); + } + if (hasStorageInfo()) { + hash = (37 * hash) + STORAGEINFO_FIELD_NUMBER; + hash = (53 * hash) + getStorageInfo().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + return hash; + } + + public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data).buildParsed(); + } + public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data, extensionRegistry) + .buildParsed(); + } + public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data).buildParsed(); + } + public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data, extensionRegistry) + .buildParsed(); + } + public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto parseFrom(java.io.InputStream input) + throws java.io.IOException { + return newBuilder().mergeFrom(input).buildParsed(); + } + public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return newBuilder().mergeFrom(input, extensionRegistry) + .buildParsed(); + } + public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + Builder builder = newBuilder(); + if (builder.mergeDelimitedFrom(input)) { + return builder.buildParsed(); + } else { + return null; + } + } + public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + Builder builder = newBuilder(); + if (builder.mergeDelimitedFrom(input, extensionRegistry)) { + return builder.buildParsed(); + } else { + return null; + } + } + public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return newBuilder().mergeFrom(input).buildParsed(); + } + public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return newBuilder().mergeFrom(input, extensionRegistry) + .buildParsed(); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProtoOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_NamespaceInfoProto_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_NamespaceInfoProto_fieldAccessorTable; + } + + // Construct using org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + getStorageInfoFieldBuilder(); + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + buildVersion_ = ""; + bitField0_ = (bitField0_ & ~0x00000001); + distUpgradeVersion_ = 0; + bitField0_ = (bitField0_ & ~0x00000002); + blockPoolID_ = ""; + bitField0_ = (bitField0_ & ~0x00000004); + if (storageInfoBuilder_ == null) { + storageInfo_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto.getDefaultInstance(); + } else { + storageInfoBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000008); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto.getDescriptor(); + } + + public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto getDefaultInstanceForType() { + return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto.getDefaultInstance(); + } + + public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto build() { + org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto buildParsed() + throws com.google.protobuf.InvalidProtocolBufferException { + org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException( + result).asInvalidProtocolBufferException(); + } + return result; + } + + public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto buildPartial() { + org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto result = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + result.buildVersion_ = buildVersion_; + if (((from_bitField0_ & 0x00000002) == 0x00000002)) { + to_bitField0_ |= 0x00000002; + } + result.distUpgradeVersion_ = distUpgradeVersion_; + if (((from_bitField0_ & 0x00000004) == 0x00000004)) { + to_bitField0_ |= 0x00000004; + } + result.blockPoolID_ = blockPoolID_; + if (((from_bitField0_ & 0x00000008) == 0x00000008)) { + to_bitField0_ |= 0x00000008; + } + if (storageInfoBuilder_ == null) { + result.storageInfo_ = storageInfo_; + } else { + result.storageInfo_ = storageInfoBuilder_.build(); + } + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto) { + return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto other) { + if (other == org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto.getDefaultInstance()) return this; + if (other.hasBuildVersion()) { + setBuildVersion(other.getBuildVersion()); + } + if (other.hasDistUpgradeVersion()) { + setDistUpgradeVersion(other.getDistUpgradeVersion()); + } + if (other.hasBlockPoolID()) { + setBlockPoolID(other.getBlockPoolID()); + } + if (other.hasStorageInfo()) { + mergeStorageInfo(other.getStorageInfo()); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + if (!hasBuildVersion()) { + + return false; + } + if (!hasDistUpgradeVersion()) { + + return false; + } + if (!hasBlockPoolID()) { + + return false; + } + if (!hasStorageInfo()) { + + return false; + } + if (!getStorageInfo().isInitialized()) { + + return false; + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder( + this.getUnknownFields()); + while (true) { + int tag = input.readTag(); + switch (tag) { + case 0: + this.setUnknownFields(unknownFields.build()); + onChanged(); + return this; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + this.setUnknownFields(unknownFields.build()); + onChanged(); + return this; + } + break; + } + case 10: { + bitField0_ |= 0x00000001; + buildVersion_ = input.readBytes(); + break; + } + case 16: { + bitField0_ |= 0x00000002; + distUpgradeVersion_ = input.readUInt32(); + break; + } + case 26: { + bitField0_ |= 0x00000004; + blockPoolID_ = input.readBytes(); + break; + } + case 34: { + org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto.Builder subBuilder = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto.newBuilder(); + if (hasStorageInfo()) { + subBuilder.mergeFrom(getStorageInfo()); + } + input.readMessage(subBuilder, extensionRegistry); + setStorageInfo(subBuilder.buildPartial()); + break; + } + } + } + } + + private int bitField0_; + + // required string buildVersion = 1; + private java.lang.Object buildVersion_ = ""; + public boolean hasBuildVersion() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + public String getBuildVersion() { + java.lang.Object ref = buildVersion_; + if (!(ref instanceof String)) { + String s = ((com.google.protobuf.ByteString) ref).toStringUtf8(); + buildVersion_ = s; + return s; + } else { + return (String) ref; + } + } + public Builder setBuildVersion(String value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + buildVersion_ = value; + onChanged(); + return this; + } + public Builder clearBuildVersion() { + bitField0_ = (bitField0_ & ~0x00000001); + buildVersion_ = getDefaultInstance().getBuildVersion(); + onChanged(); + return this; + } + void setBuildVersion(com.google.protobuf.ByteString value) { + bitField0_ |= 0x00000001; + buildVersion_ = value; + onChanged(); + } + + // required uint32 distUpgradeVersion = 2; + private int distUpgradeVersion_ ; + public boolean hasDistUpgradeVersion() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + public int getDistUpgradeVersion() { + return distUpgradeVersion_; + } + public Builder setDistUpgradeVersion(int value) { + bitField0_ |= 0x00000002; + distUpgradeVersion_ = value; + onChanged(); + return this; + } + public Builder clearDistUpgradeVersion() { + bitField0_ = (bitField0_ & ~0x00000002); + distUpgradeVersion_ = 0; + onChanged(); + return this; + } + + // required string blockPoolID = 3; + private java.lang.Object blockPoolID_ = ""; + public boolean hasBlockPoolID() { + return ((bitField0_ & 0x00000004) == 0x00000004); + } + public String getBlockPoolID() { + java.lang.Object ref = blockPoolID_; + if (!(ref instanceof String)) { + String s = ((com.google.protobuf.ByteString) ref).toStringUtf8(); + blockPoolID_ = s; + return s; + } else { + return (String) ref; + } + } + public Builder setBlockPoolID(String value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000004; + blockPoolID_ = value; + onChanged(); + return this; + } + public Builder clearBlockPoolID() { + bitField0_ = (bitField0_ & ~0x00000004); + blockPoolID_ = getDefaultInstance().getBlockPoolID(); + onChanged(); + return this; + } + void setBlockPoolID(com.google.protobuf.ByteString value) { + bitField0_ |= 0x00000004; + blockPoolID_ = value; + onChanged(); + } + + // required .StorageInfoProto storageInfo = 4; + private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto storageInfo_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto.getDefaultInstance(); + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProtoOrBuilder> storageInfoBuilder_; + public boolean hasStorageInfo() { + return ((bitField0_ & 0x00000008) == 0x00000008); + } + public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto getStorageInfo() { + if (storageInfoBuilder_ == null) { + return storageInfo_; + } else { + return storageInfoBuilder_.getMessage(); + } + } + public Builder setStorageInfo(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto value) { + if (storageInfoBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + storageInfo_ = value; + onChanged(); + } else { + storageInfoBuilder_.setMessage(value); + } + bitField0_ |= 0x00000008; + return this; + } + public Builder setStorageInfo( + org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto.Builder builderForValue) { + if (storageInfoBuilder_ == null) { + storageInfo_ = builderForValue.build(); + onChanged(); + } else { + storageInfoBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000008; + return this; + } + public Builder mergeStorageInfo(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto value) { + if (storageInfoBuilder_ == null) { + if (((bitField0_ & 0x00000008) == 0x00000008) && + storageInfo_ != org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto.getDefaultInstance()) { + storageInfo_ = + org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto.newBuilder(storageInfo_).mergeFrom(value).buildPartial(); + } else { + storageInfo_ = value; + } + onChanged(); + } else { + storageInfoBuilder_.mergeFrom(value); + } + bitField0_ |= 0x00000008; + return this; + } + public Builder clearStorageInfo() { + if (storageInfoBuilder_ == null) { + storageInfo_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto.getDefaultInstance(); + onChanged(); + } else { + storageInfoBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000008); + return this; + } + public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto.Builder getStorageInfoBuilder() { + bitField0_ |= 0x00000008; + onChanged(); + return getStorageInfoFieldBuilder().getBuilder(); + } + public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProtoOrBuilder getStorageInfoOrBuilder() { + if (storageInfoBuilder_ != null) { + return storageInfoBuilder_.getMessageOrBuilder(); + } else { + return storageInfo_; + } + } + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProtoOrBuilder> + getStorageInfoFieldBuilder() { + if (storageInfoBuilder_ == null) { + storageInfoBuilder_ = new com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProtoOrBuilder>( + storageInfo_, + getParentForChildren(), + isClean()); + storageInfo_ = null; + } + return storageInfoBuilder_; + } + + // @@protoc_insertion_point(builder_scope:NamespaceInfoProto) + } + + static { + defaultInstance = new NamespaceInfoProto(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:NamespaceInfoProto) + } + + public interface BlockKeyProtoOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // required uint32 keyId = 1; + boolean hasKeyId(); + int getKeyId(); + + // required uint64 expiryDate = 2; + boolean hasExpiryDate(); + long getExpiryDate(); + + // required bytes keyBytes = 3; + boolean hasKeyBytes(); + com.google.protobuf.ByteString getKeyBytes(); + } + public static final class BlockKeyProto extends + com.google.protobuf.GeneratedMessage + implements BlockKeyProtoOrBuilder { + // Use BlockKeyProto.newBuilder() to construct. + private BlockKeyProto(Builder builder) { + super(builder); + } + private BlockKeyProto(boolean noInit) {} + + private static final BlockKeyProto defaultInstance; + public static BlockKeyProto getDefaultInstance() { + return defaultInstance; + } + + public BlockKeyProto getDefaultInstanceForType() { + return defaultInstance; + } + + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_BlockKeyProto_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_BlockKeyProto_fieldAccessorTable; + } + + private int bitField0_; + // required uint32 keyId = 1; + public static final int KEYID_FIELD_NUMBER = 1; + private int keyId_; + public boolean hasKeyId() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + public int getKeyId() { + return keyId_; + } + + // required uint64 expiryDate = 2; + public static final int EXPIRYDATE_FIELD_NUMBER = 2; + private long expiryDate_; + public boolean hasExpiryDate() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + public long getExpiryDate() { + return expiryDate_; + } + + // required bytes keyBytes = 3; + public static final int KEYBYTES_FIELD_NUMBER = 3; + private com.google.protobuf.ByteString keyBytes_; + public boolean hasKeyBytes() { + return ((bitField0_ & 0x00000004) == 0x00000004); + } + public com.google.protobuf.ByteString getKeyBytes() { + return keyBytes_; + } + + private void initFields() { + keyId_ = 0; + expiryDate_ = 0L; + keyBytes_ = com.google.protobuf.ByteString.EMPTY; + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + if (!hasKeyId()) { + memoizedIsInitialized = 0; + return false; + } + if (!hasExpiryDate()) { + memoizedIsInitialized = 0; + return false; + } + if (!hasKeyBytes()) { + memoizedIsInitialized = 0; + return false; + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeUInt32(1, keyId_); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + output.writeUInt64(2, expiryDate_); + } + if (((bitField0_ & 0x00000004) == 0x00000004)) { + output.writeBytes(3, keyBytes_); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeUInt32Size(1, keyId_); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + size += com.google.protobuf.CodedOutputStream + .computeUInt64Size(2, expiryDate_); + } + if (((bitField0_ & 0x00000004) == 0x00000004)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(3, keyBytes_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockKeyProto)) { + return super.equals(obj); + } + org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockKeyProto other = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockKeyProto) obj; + + boolean result = true; + result = result && (hasKeyId() == other.hasKeyId()); + if (hasKeyId()) { + result = result && (getKeyId() + == other.getKeyId()); + } + result = result && (hasExpiryDate() == other.hasExpiryDate()); + if (hasExpiryDate()) { + result = result && (getExpiryDate() + == other.getExpiryDate()); + } + result = result && (hasKeyBytes() == other.hasKeyBytes()); + if (hasKeyBytes()) { + result = result && getKeyBytes() + .equals(other.getKeyBytes()); + } + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + @java.lang.Override + public int hashCode() { + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + if (hasKeyId()) { + hash = (37 * hash) + KEYID_FIELD_NUMBER; + hash = (53 * hash) + getKeyId(); + } + if (hasExpiryDate()) { + hash = (37 * hash) + EXPIRYDATE_FIELD_NUMBER; + hash = (53 * hash) + hashLong(getExpiryDate()); + } + if (hasKeyBytes()) { + hash = (37 * hash) + KEYBYTES_FIELD_NUMBER; + hash = (53 * hash) + getKeyBytes().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + return hash; + } + + public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockKeyProto parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data).buildParsed(); + } + public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockKeyProto parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data, extensionRegistry) + .buildParsed(); + } + public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockKeyProto parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data).buildParsed(); + } + public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockKeyProto parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data, extensionRegistry) + .buildParsed(); + } + public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockKeyProto parseFrom(java.io.InputStream input) + throws java.io.IOException { + return newBuilder().mergeFrom(input).buildParsed(); + } + public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockKeyProto parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return newBuilder().mergeFrom(input, extensionRegistry) + .buildParsed(); + } + public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockKeyProto parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + Builder builder = newBuilder(); + if (builder.mergeDelimitedFrom(input)) { + return builder.buildParsed(); + } else { + return null; + } + } + public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockKeyProto parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + Builder builder = newBuilder(); + if (builder.mergeDelimitedFrom(input, extensionRegistry)) { + return builder.buildParsed(); + } else { + return null; + } + } + public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockKeyProto parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return newBuilder().mergeFrom(input).buildParsed(); + } + public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockKeyProto parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return newBuilder().mergeFrom(input, extensionRegistry) + .buildParsed(); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockKeyProto prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockKeyProtoOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_BlockKeyProto_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_BlockKeyProto_fieldAccessorTable; + } + + // Construct using org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockKeyProto.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + keyId_ = 0; + bitField0_ = (bitField0_ & ~0x00000001); + expiryDate_ = 0L; + bitField0_ = (bitField0_ & ~0x00000002); + keyBytes_ = com.google.protobuf.ByteString.EMPTY; + bitField0_ = (bitField0_ & ~0x00000004); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockKeyProto.getDescriptor(); + } + + public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockKeyProto getDefaultInstanceForType() { + return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockKeyProto.getDefaultInstance(); + } + + public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockKeyProto build() { + org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockKeyProto result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockKeyProto buildParsed() + throws com.google.protobuf.InvalidProtocolBufferException { + org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockKeyProto result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException( + result).asInvalidProtocolBufferException(); + } + return result; + } + + public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockKeyProto buildPartial() { + org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockKeyProto result = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockKeyProto(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + result.keyId_ = keyId_; + if (((from_bitField0_ & 0x00000002) == 0x00000002)) { + to_bitField0_ |= 0x00000002; + } + result.expiryDate_ = expiryDate_; + if (((from_bitField0_ & 0x00000004) == 0x00000004)) { + to_bitField0_ |= 0x00000004; + } + result.keyBytes_ = keyBytes_; + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockKeyProto) { + return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockKeyProto)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockKeyProto other) { + if (other == org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockKeyProto.getDefaultInstance()) return this; + if (other.hasKeyId()) { + setKeyId(other.getKeyId()); + } + if (other.hasExpiryDate()) { + setExpiryDate(other.getExpiryDate()); + } + if (other.hasKeyBytes()) { + setKeyBytes(other.getKeyBytes()); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + if (!hasKeyId()) { + + return false; + } + if (!hasExpiryDate()) { + + return false; + } + if (!hasKeyBytes()) { + + return false; + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder( + this.getUnknownFields()); + while (true) { + int tag = input.readTag(); + switch (tag) { + case 0: + this.setUnknownFields(unknownFields.build()); + onChanged(); + return this; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + this.setUnknownFields(unknownFields.build()); + onChanged(); + return this; + } + break; + } + case 8: { + bitField0_ |= 0x00000001; + keyId_ = input.readUInt32(); + break; + } + case 16: { + bitField0_ |= 0x00000002; + expiryDate_ = input.readUInt64(); + break; + } + case 26: { + bitField0_ |= 0x00000004; + keyBytes_ = input.readBytes(); + break; + } + } + } + } + + private int bitField0_; + + // required uint32 keyId = 1; + private int keyId_ ; + public boolean hasKeyId() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + public int getKeyId() { + return keyId_; + } + public Builder setKeyId(int value) { + bitField0_ |= 0x00000001; + keyId_ = value; + onChanged(); + return this; + } + public Builder clearKeyId() { + bitField0_ = (bitField0_ & ~0x00000001); + keyId_ = 0; + onChanged(); + return this; + } + + // required uint64 expiryDate = 2; + private long expiryDate_ ; + public boolean hasExpiryDate() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + public long getExpiryDate() { + return expiryDate_; + } + public Builder setExpiryDate(long value) { + bitField0_ |= 0x00000002; + expiryDate_ = value; + onChanged(); + return this; + } + public Builder clearExpiryDate() { + bitField0_ = (bitField0_ & ~0x00000002); + expiryDate_ = 0L; + onChanged(); + return this; + } + + // required bytes keyBytes = 3; + private com.google.protobuf.ByteString keyBytes_ = com.google.protobuf.ByteString.EMPTY; + public boolean hasKeyBytes() { + return ((bitField0_ & 0x00000004) == 0x00000004); + } + public com.google.protobuf.ByteString getKeyBytes() { + return keyBytes_; + } + public Builder setKeyBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000004; + keyBytes_ = value; + onChanged(); + return this; + } + public Builder clearKeyBytes() { + bitField0_ = (bitField0_ & ~0x00000004); + keyBytes_ = getDefaultInstance().getKeyBytes(); + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:BlockKeyProto) + } + + static { + defaultInstance = new BlockKeyProto(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:BlockKeyProto) + } + + public interface ExportedBlockKeysProtoOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // required bool isBlockTokenEnabled = 1; + boolean hasIsBlockTokenEnabled(); + boolean getIsBlockTokenEnabled(); + + // required uint64 keyUpdateInterval = 2; + boolean hasKeyUpdateInterval(); + long getKeyUpdateInterval(); + + // required uint64 tokenLifeTime = 3; + boolean hasTokenLifeTime(); + long getTokenLifeTime(); + + // required .BlockKeyProto currentKey = 4; + boolean hasCurrentKey(); + org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockKeyProto getCurrentKey(); + org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockKeyProtoOrBuilder getCurrentKeyOrBuilder(); + + // repeated .BlockKeyProto allKeys = 5; + java.util.List + getAllKeysList(); + org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockKeyProto getAllKeys(int index); + int getAllKeysCount(); + java.util.List + getAllKeysOrBuilderList(); + org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockKeyProtoOrBuilder getAllKeysOrBuilder( + int index); + } + public static final class ExportedBlockKeysProto extends + com.google.protobuf.GeneratedMessage + implements ExportedBlockKeysProtoOrBuilder { + // Use ExportedBlockKeysProto.newBuilder() to construct. + private ExportedBlockKeysProto(Builder builder) { + super(builder); + } + private ExportedBlockKeysProto(boolean noInit) {} + + private static final ExportedBlockKeysProto defaultInstance; + public static ExportedBlockKeysProto getDefaultInstance() { + return defaultInstance; + } + + public ExportedBlockKeysProto getDefaultInstanceForType() { + return defaultInstance; + } + + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_ExportedBlockKeysProto_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_ExportedBlockKeysProto_fieldAccessorTable; + } + + private int bitField0_; + // required bool isBlockTokenEnabled = 1; + public static final int ISBLOCKTOKENENABLED_FIELD_NUMBER = 1; + private boolean isBlockTokenEnabled_; + public boolean hasIsBlockTokenEnabled() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + public boolean getIsBlockTokenEnabled() { + return isBlockTokenEnabled_; + } + + // required uint64 keyUpdateInterval = 2; + public static final int KEYUPDATEINTERVAL_FIELD_NUMBER = 2; + private long keyUpdateInterval_; + public boolean hasKeyUpdateInterval() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + public long getKeyUpdateInterval() { + return keyUpdateInterval_; + } + + // required uint64 tokenLifeTime = 3; + public static final int TOKENLIFETIME_FIELD_NUMBER = 3; + private long tokenLifeTime_; + public boolean hasTokenLifeTime() { + return ((bitField0_ & 0x00000004) == 0x00000004); + } + public long getTokenLifeTime() { + return tokenLifeTime_; + } + + // required .BlockKeyProto currentKey = 4; + public static final int CURRENTKEY_FIELD_NUMBER = 4; + private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockKeyProto currentKey_; + public boolean hasCurrentKey() { + return ((bitField0_ & 0x00000008) == 0x00000008); + } + public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockKeyProto getCurrentKey() { + return currentKey_; + } + public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockKeyProtoOrBuilder getCurrentKeyOrBuilder() { + return currentKey_; + } + + // repeated .BlockKeyProto allKeys = 5; + public static final int ALLKEYS_FIELD_NUMBER = 5; + private java.util.List allKeys_; + public java.util.List getAllKeysList() { + return allKeys_; + } + public java.util.List + getAllKeysOrBuilderList() { + return allKeys_; + } + public int getAllKeysCount() { + return allKeys_.size(); + } + public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockKeyProto getAllKeys(int index) { + return allKeys_.get(index); + } + public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockKeyProtoOrBuilder getAllKeysOrBuilder( + int index) { + return allKeys_.get(index); + } + + private void initFields() { + isBlockTokenEnabled_ = false; + keyUpdateInterval_ = 0L; + tokenLifeTime_ = 0L; + currentKey_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockKeyProto.getDefaultInstance(); + allKeys_ = java.util.Collections.emptyList(); + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + if (!hasIsBlockTokenEnabled()) { + memoizedIsInitialized = 0; + return false; + } + if (!hasKeyUpdateInterval()) { + memoizedIsInitialized = 0; + return false; + } + if (!hasTokenLifeTime()) { + memoizedIsInitialized = 0; + return false; + } + if (!hasCurrentKey()) { + memoizedIsInitialized = 0; + return false; + } + if (!getCurrentKey().isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + for (int i = 0; i < getAllKeysCount(); i++) { + if (!getAllKeys(i).isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeBool(1, isBlockTokenEnabled_); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + output.writeUInt64(2, keyUpdateInterval_); + } + if (((bitField0_ & 0x00000004) == 0x00000004)) { + output.writeUInt64(3, tokenLifeTime_); + } + if (((bitField0_ & 0x00000008) == 0x00000008)) { + output.writeMessage(4, currentKey_); + } + for (int i = 0; i < allKeys_.size(); i++) { + output.writeMessage(5, allKeys_.get(i)); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeBoolSize(1, isBlockTokenEnabled_); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + size += com.google.protobuf.CodedOutputStream + .computeUInt64Size(2, keyUpdateInterval_); + } + if (((bitField0_ & 0x00000004) == 0x00000004)) { + size += com.google.protobuf.CodedOutputStream + .computeUInt64Size(3, tokenLifeTime_); + } + if (((bitField0_ & 0x00000008) == 0x00000008)) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(4, currentKey_); + } + for (int i = 0; i < allKeys_.size(); i++) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(5, allKeys_.get(i)); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExportedBlockKeysProto)) { + return super.equals(obj); + } + org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExportedBlockKeysProto other = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExportedBlockKeysProto) obj; + + boolean result = true; + result = result && (hasIsBlockTokenEnabled() == other.hasIsBlockTokenEnabled()); + if (hasIsBlockTokenEnabled()) { + result = result && (getIsBlockTokenEnabled() + == other.getIsBlockTokenEnabled()); + } + result = result && (hasKeyUpdateInterval() == other.hasKeyUpdateInterval()); + if (hasKeyUpdateInterval()) { + result = result && (getKeyUpdateInterval() + == other.getKeyUpdateInterval()); + } + result = result && (hasTokenLifeTime() == other.hasTokenLifeTime()); + if (hasTokenLifeTime()) { + result = result && (getTokenLifeTime() + == other.getTokenLifeTime()); + } + result = result && (hasCurrentKey() == other.hasCurrentKey()); + if (hasCurrentKey()) { + result = result && getCurrentKey() + .equals(other.getCurrentKey()); + } + result = result && getAllKeysList() + .equals(other.getAllKeysList()); + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + @java.lang.Override + public int hashCode() { + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + if (hasIsBlockTokenEnabled()) { + hash = (37 * hash) + ISBLOCKTOKENENABLED_FIELD_NUMBER; + hash = (53 * hash) + hashBoolean(getIsBlockTokenEnabled()); + } + if (hasKeyUpdateInterval()) { + hash = (37 * hash) + KEYUPDATEINTERVAL_FIELD_NUMBER; + hash = (53 * hash) + hashLong(getKeyUpdateInterval()); + } + if (hasTokenLifeTime()) { + hash = (37 * hash) + TOKENLIFETIME_FIELD_NUMBER; + hash = (53 * hash) + hashLong(getTokenLifeTime()); + } + if (hasCurrentKey()) { + hash = (37 * hash) + CURRENTKEY_FIELD_NUMBER; + hash = (53 * hash) + getCurrentKey().hashCode(); + } + if (getAllKeysCount() > 0) { + hash = (37 * hash) + ALLKEYS_FIELD_NUMBER; + hash = (53 * hash) + getAllKeysList().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + return hash; + } + + public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExportedBlockKeysProto parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data).buildParsed(); + } + public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExportedBlockKeysProto parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data, extensionRegistry) + .buildParsed(); + } + public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExportedBlockKeysProto parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data).buildParsed(); + } + public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExportedBlockKeysProto parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data, extensionRegistry) + .buildParsed(); + } + public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExportedBlockKeysProto parseFrom(java.io.InputStream input) + throws java.io.IOException { + return newBuilder().mergeFrom(input).buildParsed(); + } + public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExportedBlockKeysProto parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return newBuilder().mergeFrom(input, extensionRegistry) + .buildParsed(); + } + public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExportedBlockKeysProto parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + Builder builder = newBuilder(); + if (builder.mergeDelimitedFrom(input)) { + return builder.buildParsed(); + } else { + return null; + } + } + public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExportedBlockKeysProto parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + Builder builder = newBuilder(); + if (builder.mergeDelimitedFrom(input, extensionRegistry)) { + return builder.buildParsed(); + } else { + return null; + } + } + public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExportedBlockKeysProto parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return newBuilder().mergeFrom(input).buildParsed(); + } + public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExportedBlockKeysProto parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return newBuilder().mergeFrom(input, extensionRegistry) + .buildParsed(); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExportedBlockKeysProto prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExportedBlockKeysProtoOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_ExportedBlockKeysProto_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_ExportedBlockKeysProto_fieldAccessorTable; + } + + // Construct using org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExportedBlockKeysProto.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + getCurrentKeyFieldBuilder(); + getAllKeysFieldBuilder(); + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + isBlockTokenEnabled_ = false; + bitField0_ = (bitField0_ & ~0x00000001); + keyUpdateInterval_ = 0L; + bitField0_ = (bitField0_ & ~0x00000002); + tokenLifeTime_ = 0L; + bitField0_ = (bitField0_ & ~0x00000004); + if (currentKeyBuilder_ == null) { + currentKey_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockKeyProto.getDefaultInstance(); + } else { + currentKeyBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000008); + if (allKeysBuilder_ == null) { + allKeys_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000010); + } else { + allKeysBuilder_.clear(); + } + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExportedBlockKeysProto.getDescriptor(); + } + + public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExportedBlockKeysProto getDefaultInstanceForType() { + return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExportedBlockKeysProto.getDefaultInstance(); + } + + public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExportedBlockKeysProto build() { + org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExportedBlockKeysProto result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExportedBlockKeysProto buildParsed() + throws com.google.protobuf.InvalidProtocolBufferException { + org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExportedBlockKeysProto result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException( + result).asInvalidProtocolBufferException(); + } + return result; + } + + public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExportedBlockKeysProto buildPartial() { + org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExportedBlockKeysProto result = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExportedBlockKeysProto(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + result.isBlockTokenEnabled_ = isBlockTokenEnabled_; + if (((from_bitField0_ & 0x00000002) == 0x00000002)) { + to_bitField0_ |= 0x00000002; + } + result.keyUpdateInterval_ = keyUpdateInterval_; + if (((from_bitField0_ & 0x00000004) == 0x00000004)) { + to_bitField0_ |= 0x00000004; + } + result.tokenLifeTime_ = tokenLifeTime_; + if (((from_bitField0_ & 0x00000008) == 0x00000008)) { + to_bitField0_ |= 0x00000008; + } + if (currentKeyBuilder_ == null) { + result.currentKey_ = currentKey_; + } else { + result.currentKey_ = currentKeyBuilder_.build(); + } + if (allKeysBuilder_ == null) { + if (((bitField0_ & 0x00000010) == 0x00000010)) { + allKeys_ = java.util.Collections.unmodifiableList(allKeys_); + bitField0_ = (bitField0_ & ~0x00000010); + } + result.allKeys_ = allKeys_; + } else { + result.allKeys_ = allKeysBuilder_.build(); + } + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExportedBlockKeysProto) { + return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExportedBlockKeysProto)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExportedBlockKeysProto other) { + if (other == org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExportedBlockKeysProto.getDefaultInstance()) return this; + if (other.hasIsBlockTokenEnabled()) { + setIsBlockTokenEnabled(other.getIsBlockTokenEnabled()); + } + if (other.hasKeyUpdateInterval()) { + setKeyUpdateInterval(other.getKeyUpdateInterval()); + } + if (other.hasTokenLifeTime()) { + setTokenLifeTime(other.getTokenLifeTime()); + } + if (other.hasCurrentKey()) { + mergeCurrentKey(other.getCurrentKey()); + } + if (allKeysBuilder_ == null) { + if (!other.allKeys_.isEmpty()) { + if (allKeys_.isEmpty()) { + allKeys_ = other.allKeys_; + bitField0_ = (bitField0_ & ~0x00000010); + } else { + ensureAllKeysIsMutable(); + allKeys_.addAll(other.allKeys_); + } + onChanged(); + } + } else { + if (!other.allKeys_.isEmpty()) { + if (allKeysBuilder_.isEmpty()) { + allKeysBuilder_.dispose(); + allKeysBuilder_ = null; + allKeys_ = other.allKeys_; + bitField0_ = (bitField0_ & ~0x00000010); + allKeysBuilder_ = + com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ? + getAllKeysFieldBuilder() : null; + } else { + allKeysBuilder_.addAllMessages(other.allKeys_); + } + } + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + if (!hasIsBlockTokenEnabled()) { + + return false; + } + if (!hasKeyUpdateInterval()) { + + return false; + } + if (!hasTokenLifeTime()) { + + return false; + } + if (!hasCurrentKey()) { + + return false; + } + if (!getCurrentKey().isInitialized()) { + + return false; + } + for (int i = 0; i < getAllKeysCount(); i++) { + if (!getAllKeys(i).isInitialized()) { + + return false; + } + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder( + this.getUnknownFields()); + while (true) { + int tag = input.readTag(); + switch (tag) { + case 0: + this.setUnknownFields(unknownFields.build()); + onChanged(); + return this; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + this.setUnknownFields(unknownFields.build()); + onChanged(); + return this; + } + break; + } + case 8: { + bitField0_ |= 0x00000001; + isBlockTokenEnabled_ = input.readBool(); + break; + } + case 16: { + bitField0_ |= 0x00000002; + keyUpdateInterval_ = input.readUInt64(); + break; + } + case 24: { + bitField0_ |= 0x00000004; + tokenLifeTime_ = input.readUInt64(); + break; + } + case 34: { + org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockKeyProto.Builder subBuilder = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockKeyProto.newBuilder(); + if (hasCurrentKey()) { + subBuilder.mergeFrom(getCurrentKey()); + } + input.readMessage(subBuilder, extensionRegistry); + setCurrentKey(subBuilder.buildPartial()); + break; + } + case 42: { + org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockKeyProto.Builder subBuilder = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockKeyProto.newBuilder(); + input.readMessage(subBuilder, extensionRegistry); + addAllKeys(subBuilder.buildPartial()); + break; + } + } + } + } + + private int bitField0_; + + // required bool isBlockTokenEnabled = 1; + private boolean isBlockTokenEnabled_ ; + public boolean hasIsBlockTokenEnabled() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + public boolean getIsBlockTokenEnabled() { + return isBlockTokenEnabled_; + } + public Builder setIsBlockTokenEnabled(boolean value) { + bitField0_ |= 0x00000001; + isBlockTokenEnabled_ = value; + onChanged(); + return this; + } + public Builder clearIsBlockTokenEnabled() { + bitField0_ = (bitField0_ & ~0x00000001); + isBlockTokenEnabled_ = false; + onChanged(); + return this; + } + + // required uint64 keyUpdateInterval = 2; + private long keyUpdateInterval_ ; + public boolean hasKeyUpdateInterval() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + public long getKeyUpdateInterval() { + return keyUpdateInterval_; + } + public Builder setKeyUpdateInterval(long value) { + bitField0_ |= 0x00000002; + keyUpdateInterval_ = value; + onChanged(); + return this; + } + public Builder clearKeyUpdateInterval() { + bitField0_ = (bitField0_ & ~0x00000002); + keyUpdateInterval_ = 0L; + onChanged(); + return this; + } + + // required uint64 tokenLifeTime = 3; + private long tokenLifeTime_ ; + public boolean hasTokenLifeTime() { + return ((bitField0_ & 0x00000004) == 0x00000004); + } + public long getTokenLifeTime() { + return tokenLifeTime_; + } + public Builder setTokenLifeTime(long value) { + bitField0_ |= 0x00000004; + tokenLifeTime_ = value; + onChanged(); + return this; + } + public Builder clearTokenLifeTime() { + bitField0_ = (bitField0_ & ~0x00000004); + tokenLifeTime_ = 0L; + onChanged(); + return this; + } + + // required .BlockKeyProto currentKey = 4; + private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockKeyProto currentKey_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockKeyProto.getDefaultInstance(); + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockKeyProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockKeyProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockKeyProtoOrBuilder> currentKeyBuilder_; + public boolean hasCurrentKey() { + return ((bitField0_ & 0x00000008) == 0x00000008); + } + public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockKeyProto getCurrentKey() { + if (currentKeyBuilder_ == null) { + return currentKey_; + } else { + return currentKeyBuilder_.getMessage(); + } + } + public Builder setCurrentKey(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockKeyProto value) { + if (currentKeyBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + currentKey_ = value; + onChanged(); + } else { + currentKeyBuilder_.setMessage(value); + } + bitField0_ |= 0x00000008; + return this; + } + public Builder setCurrentKey( + org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockKeyProto.Builder builderForValue) { + if (currentKeyBuilder_ == null) { + currentKey_ = builderForValue.build(); + onChanged(); + } else { + currentKeyBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000008; + return this; + } + public Builder mergeCurrentKey(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockKeyProto value) { + if (currentKeyBuilder_ == null) { + if (((bitField0_ & 0x00000008) == 0x00000008) && + currentKey_ != org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockKeyProto.getDefaultInstance()) { + currentKey_ = + org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockKeyProto.newBuilder(currentKey_).mergeFrom(value).buildPartial(); + } else { + currentKey_ = value; + } + onChanged(); + } else { + currentKeyBuilder_.mergeFrom(value); + } + bitField0_ |= 0x00000008; + return this; + } + public Builder clearCurrentKey() { + if (currentKeyBuilder_ == null) { + currentKey_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockKeyProto.getDefaultInstance(); + onChanged(); + } else { + currentKeyBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000008); + return this; + } + public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockKeyProto.Builder getCurrentKeyBuilder() { + bitField0_ |= 0x00000008; + onChanged(); + return getCurrentKeyFieldBuilder().getBuilder(); + } + public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockKeyProtoOrBuilder getCurrentKeyOrBuilder() { + if (currentKeyBuilder_ != null) { + return currentKeyBuilder_.getMessageOrBuilder(); + } else { + return currentKey_; + } + } + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockKeyProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockKeyProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockKeyProtoOrBuilder> + getCurrentKeyFieldBuilder() { + if (currentKeyBuilder_ == null) { + currentKeyBuilder_ = new com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockKeyProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockKeyProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockKeyProtoOrBuilder>( + currentKey_, + getParentForChildren(), + isClean()); + currentKey_ = null; + } + return currentKeyBuilder_; + } + + // repeated .BlockKeyProto allKeys = 5; + private java.util.List allKeys_ = + java.util.Collections.emptyList(); + private void ensureAllKeysIsMutable() { + if (!((bitField0_ & 0x00000010) == 0x00000010)) { + allKeys_ = new java.util.ArrayList(allKeys_); + bitField0_ |= 0x00000010; + } + } + + private com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockKeyProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockKeyProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockKeyProtoOrBuilder> allKeysBuilder_; + + public java.util.List getAllKeysList() { + if (allKeysBuilder_ == null) { + return java.util.Collections.unmodifiableList(allKeys_); + } else { + return allKeysBuilder_.getMessageList(); + } + } + public int getAllKeysCount() { + if (allKeysBuilder_ == null) { + return allKeys_.size(); + } else { + return allKeysBuilder_.getCount(); + } + } + public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockKeyProto getAllKeys(int index) { + if (allKeysBuilder_ == null) { + return allKeys_.get(index); + } else { + return allKeysBuilder_.getMessage(index); + } + } + public Builder setAllKeys( + int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockKeyProto value) { + if (allKeysBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureAllKeysIsMutable(); + allKeys_.set(index, value); + onChanged(); + } else { + allKeysBuilder_.setMessage(index, value); + } + return this; + } + public Builder setAllKeys( + int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockKeyProto.Builder builderForValue) { + if (allKeysBuilder_ == null) { + ensureAllKeysIsMutable(); + allKeys_.set(index, builderForValue.build()); + onChanged(); + } else { + allKeysBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + public Builder addAllKeys(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockKeyProto value) { + if (allKeysBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureAllKeysIsMutable(); + allKeys_.add(value); + onChanged(); + } else { + allKeysBuilder_.addMessage(value); + } + return this; + } + public Builder addAllKeys( + int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockKeyProto value) { + if (allKeysBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureAllKeysIsMutable(); + allKeys_.add(index, value); + onChanged(); + } else { + allKeysBuilder_.addMessage(index, value); + } + return this; + } + public Builder addAllKeys( + org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockKeyProto.Builder builderForValue) { + if (allKeysBuilder_ == null) { + ensureAllKeysIsMutable(); + allKeys_.add(builderForValue.build()); + onChanged(); + } else { + allKeysBuilder_.addMessage(builderForValue.build()); + } + return this; + } + public Builder addAllKeys( + int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockKeyProto.Builder builderForValue) { + if (allKeysBuilder_ == null) { + ensureAllKeysIsMutable(); + allKeys_.add(index, builderForValue.build()); + onChanged(); + } else { + allKeysBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + public Builder addAllAllKeys( + java.lang.Iterable values) { + if (allKeysBuilder_ == null) { + ensureAllKeysIsMutable(); + super.addAll(values, allKeys_); + onChanged(); + } else { + allKeysBuilder_.addAllMessages(values); + } + return this; + } + public Builder clearAllKeys() { + if (allKeysBuilder_ == null) { + allKeys_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000010); + onChanged(); + } else { + allKeysBuilder_.clear(); + } + return this; + } + public Builder removeAllKeys(int index) { + if (allKeysBuilder_ == null) { + ensureAllKeysIsMutable(); + allKeys_.remove(index); + onChanged(); + } else { + allKeysBuilder_.remove(index); + } + return this; + } + public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockKeyProto.Builder getAllKeysBuilder( + int index) { + return getAllKeysFieldBuilder().getBuilder(index); + } + public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockKeyProtoOrBuilder getAllKeysOrBuilder( + int index) { + if (allKeysBuilder_ == null) { + return allKeys_.get(index); } else { + return allKeysBuilder_.getMessageOrBuilder(index); + } + } + public java.util.List + getAllKeysOrBuilderList() { + if (allKeysBuilder_ != null) { + return allKeysBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(allKeys_); + } + } + public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockKeyProto.Builder addAllKeysBuilder() { + return getAllKeysFieldBuilder().addBuilder( + org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockKeyProto.getDefaultInstance()); + } + public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockKeyProto.Builder addAllKeysBuilder( + int index) { + return getAllKeysFieldBuilder().addBuilder( + index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockKeyProto.getDefaultInstance()); + } + public java.util.List + getAllKeysBuilderList() { + return getAllKeysFieldBuilder().getBuilderList(); + } + private com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockKeyProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockKeyProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockKeyProtoOrBuilder> + getAllKeysFieldBuilder() { + if (allKeysBuilder_ == null) { + allKeysBuilder_ = new com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockKeyProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockKeyProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockKeyProtoOrBuilder>( + allKeys_, + ((bitField0_ & 0x00000010) == 0x00000010), + getParentForChildren(), + isClean()); + allKeys_ = null; + } + return allKeysBuilder_; + } + + // @@protoc_insertion_point(builder_scope:ExportedBlockKeysProto) + } + + static { + defaultInstance = new ExportedBlockKeysProto(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:ExportedBlockKeysProto) + } + + public interface RecoveringBlockProtoOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // required uint64 newGenStamp = 1; + boolean hasNewGenStamp(); + long getNewGenStamp(); + + // required .LocatedBlockProto block = 2; + boolean hasBlock(); + org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto getBlock(); + org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProtoOrBuilder getBlockOrBuilder(); + } + public static final class RecoveringBlockProto extends + com.google.protobuf.GeneratedMessage + implements RecoveringBlockProtoOrBuilder { + // Use RecoveringBlockProto.newBuilder() to construct. + private RecoveringBlockProto(Builder builder) { + super(builder); + } + private RecoveringBlockProto(boolean noInit) {} + + private static final RecoveringBlockProto defaultInstance; + public static RecoveringBlockProto getDefaultInstance() { + return defaultInstance; + } + + public RecoveringBlockProto getDefaultInstanceForType() { + return defaultInstance; + } + + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_RecoveringBlockProto_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_RecoveringBlockProto_fieldAccessorTable; + } + + private int bitField0_; + // required uint64 newGenStamp = 1; + public static final int NEWGENSTAMP_FIELD_NUMBER = 1; + private long newGenStamp_; + public boolean hasNewGenStamp() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + public long getNewGenStamp() { + return newGenStamp_; + } + + // required .LocatedBlockProto block = 2; + public static final int BLOCK_FIELD_NUMBER = 2; + private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto block_; + public boolean hasBlock() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto getBlock() { + return block_; + } + public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProtoOrBuilder getBlockOrBuilder() { + return block_; + } + + private void initFields() { + newGenStamp_ = 0L; + block_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.getDefaultInstance(); + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + if (!hasNewGenStamp()) { + memoizedIsInitialized = 0; + return false; + } + if (!hasBlock()) { + memoizedIsInitialized = 0; + return false; + } + if (!getBlock().isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeUInt64(1, newGenStamp_); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + output.writeMessage(2, block_); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeUInt64Size(1, newGenStamp_); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(2, block_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RecoveringBlockProto)) { + return super.equals(obj); + } + org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RecoveringBlockProto other = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RecoveringBlockProto) obj; + + boolean result = true; + result = result && (hasNewGenStamp() == other.hasNewGenStamp()); + if (hasNewGenStamp()) { + result = result && (getNewGenStamp() + == other.getNewGenStamp()); + } + result = result && (hasBlock() == other.hasBlock()); + if (hasBlock()) { + result = result && getBlock() + .equals(other.getBlock()); + } + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + @java.lang.Override + public int hashCode() { + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + if (hasNewGenStamp()) { + hash = (37 * hash) + NEWGENSTAMP_FIELD_NUMBER; + hash = (53 * hash) + hashLong(getNewGenStamp()); + } + if (hasBlock()) { + hash = (37 * hash) + BLOCK_FIELD_NUMBER; + hash = (53 * hash) + getBlock().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + return hash; + } + + public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RecoveringBlockProto parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data).buildParsed(); + } + public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RecoveringBlockProto parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data, extensionRegistry) + .buildParsed(); + } + public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RecoveringBlockProto parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data).buildParsed(); + } + public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RecoveringBlockProto parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data, extensionRegistry) + .buildParsed(); + } + public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RecoveringBlockProto parseFrom(java.io.InputStream input) + throws java.io.IOException { + return newBuilder().mergeFrom(input).buildParsed(); + } + public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RecoveringBlockProto parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return newBuilder().mergeFrom(input, extensionRegistry) + .buildParsed(); + } + public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RecoveringBlockProto parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + Builder builder = newBuilder(); + if (builder.mergeDelimitedFrom(input)) { + return builder.buildParsed(); + } else { + return null; + } + } + public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RecoveringBlockProto parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + Builder builder = newBuilder(); + if (builder.mergeDelimitedFrom(input, extensionRegistry)) { + return builder.buildParsed(); + } else { + return null; + } + } + public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RecoveringBlockProto parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return newBuilder().mergeFrom(input).buildParsed(); + } + public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RecoveringBlockProto parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return newBuilder().mergeFrom(input, extensionRegistry) + .buildParsed(); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RecoveringBlockProto prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RecoveringBlockProtoOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_RecoveringBlockProto_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_RecoveringBlockProto_fieldAccessorTable; + } + + // Construct using org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RecoveringBlockProto.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + getBlockFieldBuilder(); + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + newGenStamp_ = 0L; + bitField0_ = (bitField0_ & ~0x00000001); + if (blockBuilder_ == null) { + block_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.getDefaultInstance(); + } else { + blockBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000002); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RecoveringBlockProto.getDescriptor(); + } + + public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RecoveringBlockProto getDefaultInstanceForType() { + return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RecoveringBlockProto.getDefaultInstance(); + } + + public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RecoveringBlockProto build() { + org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RecoveringBlockProto result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RecoveringBlockProto buildParsed() + throws com.google.protobuf.InvalidProtocolBufferException { + org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RecoveringBlockProto result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException( + result).asInvalidProtocolBufferException(); + } + return result; + } + + public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RecoveringBlockProto buildPartial() { + org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RecoveringBlockProto result = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RecoveringBlockProto(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + result.newGenStamp_ = newGenStamp_; + if (((from_bitField0_ & 0x00000002) == 0x00000002)) { + to_bitField0_ |= 0x00000002; + } + if (blockBuilder_ == null) { + result.block_ = block_; + } else { + result.block_ = blockBuilder_.build(); + } + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RecoveringBlockProto) { + return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RecoveringBlockProto)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RecoveringBlockProto other) { + if (other == org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RecoveringBlockProto.getDefaultInstance()) return this; + if (other.hasNewGenStamp()) { + setNewGenStamp(other.getNewGenStamp()); + } + if (other.hasBlock()) { + mergeBlock(other.getBlock()); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + if (!hasNewGenStamp()) { + + return false; + } + if (!hasBlock()) { + + return false; + } + if (!getBlock().isInitialized()) { + + return false; + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder( + this.getUnknownFields()); + while (true) { + int tag = input.readTag(); + switch (tag) { + case 0: + this.setUnknownFields(unknownFields.build()); + onChanged(); + return this; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + this.setUnknownFields(unknownFields.build()); + onChanged(); + return this; + } + break; + } + case 8: { + bitField0_ |= 0x00000001; + newGenStamp_ = input.readUInt64(); + break; + } + case 18: { + org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.Builder subBuilder = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.newBuilder(); + if (hasBlock()) { + subBuilder.mergeFrom(getBlock()); + } + input.readMessage(subBuilder, extensionRegistry); + setBlock(subBuilder.buildPartial()); + break; + } + } + } + } + + private int bitField0_; + + // required uint64 newGenStamp = 1; + private long newGenStamp_ ; + public boolean hasNewGenStamp() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + public long getNewGenStamp() { + return newGenStamp_; + } + public Builder setNewGenStamp(long value) { + bitField0_ |= 0x00000001; + newGenStamp_ = value; + onChanged(); + return this; + } + public Builder clearNewGenStamp() { + bitField0_ = (bitField0_ & ~0x00000001); + newGenStamp_ = 0L; + onChanged(); + return this; + } + + // required .LocatedBlockProto block = 2; + private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto block_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.getDefaultInstance(); + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProtoOrBuilder> blockBuilder_; + public boolean hasBlock() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto getBlock() { + if (blockBuilder_ == null) { + return block_; + } else { + return blockBuilder_.getMessage(); + } + } + public Builder setBlock(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto value) { + if (blockBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + block_ = value; + onChanged(); + } else { + blockBuilder_.setMessage(value); + } + bitField0_ |= 0x00000002; + return this; + } + public Builder setBlock( + org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.Builder builderForValue) { + if (blockBuilder_ == null) { + block_ = builderForValue.build(); + onChanged(); + } else { + blockBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000002; + return this; + } + public Builder mergeBlock(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto value) { + if (blockBuilder_ == null) { + if (((bitField0_ & 0x00000002) == 0x00000002) && + block_ != org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.getDefaultInstance()) { + block_ = + org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.newBuilder(block_).mergeFrom(value).buildPartial(); + } else { + block_ = value; + } + onChanged(); + } else { + blockBuilder_.mergeFrom(value); + } + bitField0_ |= 0x00000002; + return this; + } + public Builder clearBlock() { + if (blockBuilder_ == null) { + block_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.getDefaultInstance(); + onChanged(); + } else { + blockBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000002); + return this; + } + public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.Builder getBlockBuilder() { + bitField0_ |= 0x00000002; + onChanged(); + return getBlockFieldBuilder().getBuilder(); + } + public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProtoOrBuilder getBlockOrBuilder() { + if (blockBuilder_ != null) { + return blockBuilder_.getMessageOrBuilder(); + } else { + return block_; + } + } + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProtoOrBuilder> + getBlockFieldBuilder() { + if (blockBuilder_ == null) { + blockBuilder_ = new com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProtoOrBuilder>( + block_, + getParentForChildren(), + isClean()); + block_ = null; + } + return blockBuilder_; + } + + // @@protoc_insertion_point(builder_scope:RecoveringBlockProto) + } + + static { + defaultInstance = new RecoveringBlockProto(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:RecoveringBlockProto) + } + private static com.google.protobuf.Descriptors.Descriptor internal_static_ExtendedBlockProto_descriptor; private static @@ -10283,6 +20145,11 @@ public Builder clearUpgradeStatus() { private static com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_DatanodeIDProto_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_DatanodeIDsProto_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_DatanodeIDsProto_fieldAccessorTable; private static com.google.protobuf.Descriptors.Descriptor internal_static_DatanodeInfoProto_descriptor; private static @@ -10333,6 +20200,76 @@ public Builder clearUpgradeStatus() { private static com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_UpgradeStatusReportProto_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_StorageInfoProto_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_StorageInfoProto_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_NamenodeRegistrationProto_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_NamenodeRegistrationProto_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_CheckpointSignatureProto_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_CheckpointSignatureProto_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_NamenodeCommandProto_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_NamenodeCommandProto_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_CheckpointCommandProto_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_CheckpointCommandProto_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_BlockProto_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_BlockProto_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_BlockWithLocationsProto_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_BlockWithLocationsProto_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_BlocksWithLocationsProto_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_BlocksWithLocationsProto_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_RemoteEditLogProto_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_RemoteEditLogProto_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_RemoteEditLogManifestProto_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_RemoteEditLogManifestProto_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_NamespaceInfoProto_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_NamespaceInfoProto_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_BlockKeyProto_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_BlockKeyProto_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_ExportedBlockKeysProto_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_ExportedBlockKeysProto_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_RecoveringBlockProto_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_RecoveringBlockProto_fieldAccessorTable; public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { @@ -10349,48 +20286,91 @@ public Builder clearUpgradeStatus() { "\020\n\010password\030\002 \002(\014\022\014\n\004kind\030\003 \002(\t\022\017\n\007servi" + "ce\030\004 \002(\t\"U\n\017DatanodeIDProto\022\014\n\004name\030\001 \002(" + "\t\022\021\n\tstorageID\030\002 \002(\t\022\020\n\010infoPort\030\003 \002(\r\022\017" + - "\n\007ipcPort\030\004 \002(\r\"\312\002\n\021DatanodeInfoProto\022\034\n" + - "\002id\030\001 \002(\0132\020.DatanodeIDProto\022\020\n\010capacity\030" + - "\002 \001(\004\022\017\n\007dfsUsed\030\003 \001(\004\022\021\n\tremaining\030\004 \001(", - "\004\022\025\n\rblockPoolUsed\030\005 \001(\004\022\022\n\nlastUpdate\030\006" + - " \001(\004\022\024\n\014xceiverCount\030\007 \001(\r\022\020\n\010location\030\010" + - " \001(\t\022\020\n\010hostName\030\t \001(\t\0221\n\nadminState\030\n \001" + - "(\0162\035.DatanodeInfoProto.AdminState\"I\n\nAdm" + - "inState\022\n\n\006NORMAL\020\000\022\033\n\027DECOMMISSION_INPR" + - "OGRESS\020\001\022\022\n\016DECOMMISSIONED\020\002\"\212\001\n\023Content" + - "SummaryProto\022\016\n\006length\030\001 \002(\004\022\021\n\tfileCoun" + - "t\030\002 \002(\004\022\026\n\016directoryCount\030\003 \002(\004\022\r\n\005quota" + - "\030\004 \002(\004\022\025\n\rspaceConsumed\030\005 \002(\004\022\022\n\nspaceQu" + - "ota\030\006 \002(\004\"7\n\026CorruptFileBlocksProto\022\r\n\005f", - "iles\030\001 \003(\t\022\016\n\006cookie\030\002 \002(\t\"!\n\021FsPermissi" + - "onProto\022\014\n\004perm\030\001 \002(\r\"\246\001\n\021LocatedBlockPr" + - "oto\022\036\n\001b\030\001 \002(\0132\023.ExtendedBlockProto\022\016\n\006o" + - "ffset\030\002 \002(\004\022 \n\004locs\030\003 \003(\0132\022.DatanodeInfo" + - "Proto\022\017\n\007corrupt\030\004 \002(\010\022.\n\nblockToken\030\005 \002" + - "(\0132\032.BlockTokenIdentifierProto\"\253\001\n\022Locat" + - "edBlocksProto\022\022\n\nfileLength\030\001 \002(\004\022\"\n\006blo" + - "cks\030\002 \003(\0132\022.LocatedBlockProto\022\031\n\021underCo" + - "nstruction\030\003 \002(\010\022%\n\tlastBlock\030\004 \001(\0132\022.Lo" + - "catedBlockProto\022\033\n\023isLastBlockComplete\030\005", - " \002(\010\"\366\002\n\023HdfsFileStatusProto\022/\n\010fileType" + - "\030\001 \002(\0162\035.HdfsFileStatusProto.FileType\022\014\n" + - "\004path\030\002 \002(\014\022\016\n\006length\030\003 \002(\004\022&\n\npermissio" + - "n\030\004 \002(\0132\022.FsPermissionProto\022\r\n\005owner\030\005 \002" + - "(\t\022\r\n\005group\030\006 \002(\t\022\031\n\021modification_time\030\007" + - " \002(\004\022\023\n\013access_time\030\010 \002(\004\022\017\n\007symlink\030\t \001" + - "(\014\022\031\n\021block_replication\030\n \001(\r\022\021\n\tblocksi" + - "ze\030\013 \001(\004\022&\n\tlocations\030\014 \001(\0132\023.LocatedBlo" + - "cksProto\"3\n\010FileType\022\n\n\006IS_DIR\020\001\022\013\n\007IS_F" + - "ILE\020\002\022\016\n\nIS_SYMLINK\020\003\"\212\001\n\025FsServerDefaul", - "tsProto\022\021\n\tblockSize\030\001 \002(\004\022\030\n\020bytesPerCh" + - "ecksum\030\002 \002(\r\022\027\n\017writePacketSize\030\003 \002(\r\022\023\n" + - "\013replication\030\004 \002(\r\022\026\n\016fileBufferSize\030\005 \002" + - "(\r\"_\n\025DirectoryListingProto\022,\n\016partialLi" + - "sting\030\001 \003(\0132\024.HdfsFileStatusProto\022\030\n\020rem" + - "ainingEntries\030\002 \002(\r\"B\n\030UpgradeStatusRepo" + - "rtProto\022\017\n\007version\030\001 \002(\r\022\025\n\rupgradeStatu" + - "s\030\002 \002(\rB6\n%org.apache.hadoop.hdfs.protoc" + - "ol.protoB\nHdfsProtos\240\001\001" + "\n\007ipcPort\030\004 \002(\r\"7\n\020DatanodeIDsProto\022#\n\td" + + "atanodes\030\001 \003(\0132\020.DatanodeIDProto\"\312\002\n\021Dat" + + "anodeInfoProto\022\034\n\002id\030\001 \002(\0132\020.DatanodeIDP", + "roto\022\020\n\010capacity\030\002 \001(\004\022\017\n\007dfsUsed\030\003 \001(\004\022" + + "\021\n\tremaining\030\004 \001(\004\022\025\n\rblockPoolUsed\030\005 \001(" + + "\004\022\022\n\nlastUpdate\030\006 \001(\004\022\024\n\014xceiverCount\030\007 " + + "\001(\r\022\020\n\010location\030\010 \001(\t\022\020\n\010hostName\030\t \001(\t\022" + + "1\n\nadminState\030\n \001(\0162\035.DatanodeInfoProto." + + "AdminState\"I\n\nAdminState\022\n\n\006NORMAL\020\000\022\033\n\027" + + "DECOMMISSION_INPROGRESS\020\001\022\022\n\016DECOMMISSIO" + + "NED\020\002\"\212\001\n\023ContentSummaryProto\022\016\n\006length\030" + + "\001 \002(\004\022\021\n\tfileCount\030\002 \002(\004\022\026\n\016directoryCou" + + "nt\030\003 \002(\004\022\r\n\005quota\030\004 \002(\004\022\025\n\rspaceConsumed", + "\030\005 \002(\004\022\022\n\nspaceQuota\030\006 \002(\004\"7\n\026CorruptFil" + + "eBlocksProto\022\r\n\005files\030\001 \003(\t\022\016\n\006cookie\030\002 " + + "\002(\t\"!\n\021FsPermissionProto\022\014\n\004perm\030\001 \002(\r\"\246" + + "\001\n\021LocatedBlockProto\022\036\n\001b\030\001 \002(\0132\023.Extend" + + "edBlockProto\022\016\n\006offset\030\002 \002(\004\022 \n\004locs\030\003 \003" + + "(\0132\022.DatanodeInfoProto\022\017\n\007corrupt\030\004 \002(\010\022" + + ".\n\nblockToken\030\005 \002(\0132\032.BlockTokenIdentifi" + + "erProto\"\253\001\n\022LocatedBlocksProto\022\022\n\nfileLe" + + "ngth\030\001 \002(\004\022\"\n\006blocks\030\002 \003(\0132\022.LocatedBloc" + + "kProto\022\031\n\021underConstruction\030\003 \002(\010\022%\n\tlas", + "tBlock\030\004 \001(\0132\022.LocatedBlockProto\022\033\n\023isLa" + + "stBlockComplete\030\005 \002(\010\"\366\002\n\023HdfsFileStatus" + + "Proto\022/\n\010fileType\030\001 \002(\0162\035.HdfsFileStatus" + + "Proto.FileType\022\014\n\004path\030\002 \002(\014\022\016\n\006length\030\003" + + " \002(\004\022&\n\npermission\030\004 \002(\0132\022.FsPermissionP" + + "roto\022\r\n\005owner\030\005 \002(\t\022\r\n\005group\030\006 \002(\t\022\031\n\021mo" + + "dification_time\030\007 \002(\004\022\023\n\013access_time\030\010 \002" + + "(\004\022\017\n\007symlink\030\t \001(\014\022\031\n\021block_replication" + + "\030\n \001(\r\022\021\n\tblocksize\030\013 \001(\004\022&\n\tlocations\030\014" + + " \001(\0132\023.LocatedBlocksProto\"3\n\010FileType\022\n\n", + "\006IS_DIR\020\001\022\013\n\007IS_FILE\020\002\022\016\n\nIS_SYMLINK\020\003\"\212" + + "\001\n\025FsServerDefaultsProto\022\021\n\tblockSize\030\001 " + + "\002(\004\022\030\n\020bytesPerChecksum\030\002 \002(\r\022\027\n\017writePa" + + "cketSize\030\003 \002(\r\022\023\n\013replication\030\004 \002(\r\022\026\n\016f" + + "ileBufferSize\030\005 \002(\r\"_\n\025DirectoryListingP" + + "roto\022,\n\016partialListing\030\001 \003(\0132\024.HdfsFileS" + + "tatusProto\022\030\n\020remainingEntries\030\002 \002(\r\"B\n\030" + + "UpgradeStatusReportProto\022\017\n\007version\030\001 \002(" + + "\r\022\025\n\rupgradeStatus\030\002 \002(\r\"_\n\020StorageInfoP" + + "roto\022\025\n\rlayoutVersion\030\001 \002(\r\022\022\n\nnamespceI", + "D\030\002 \002(\r\022\021\n\tclusterID\030\003 \002(\t\022\r\n\005cTime\030\004 \002(" + + "\004\"\347\001\n\031NamenodeRegistrationProto\022\022\n\nrpcAd" + + "dress\030\001 \002(\t\022\023\n\013httpAddress\030\002 \002(\t\022&\n\013stor" + + "ageInfo\030\003 \002(\0132\021.StorageInfoProto\022:\n\004role" + + "\030\004 \001(\0162,.NamenodeRegistrationProto.Namen" + + "odeRoleProto\"=\n\021NamenodeRoleProto\022\014\n\010NAM" + + "ENODE\020\001\022\n\n\006BACKUP\020\002\022\016\n\nCHECKPOINT\020\003\"\221\001\n\030" + + "CheckpointSignatureProto\022\023\n\013blockPoolId\030" + + "\001 \002(\t\022 \n\030mostRecentCheckpointTxId\030\002 \002(\004\022" + + "\026\n\016curSegmentTxId\030\003 \002(\004\022&\n\013storageInfo\030\004", + " \002(\0132\021.StorageInfoProto\"\264\001\n\024NamenodeComm" + + "andProto\022\016\n\006action\030\001 \002(\r\022(\n\004type\030\002 \002(\0162\032" + + ".NamenodeCommandProto.Type\022.\n\rcheckpoint" + + "Cmd\030\003 \001(\0132\027.CheckpointCommandProto\"2\n\004Ty" + + "pe\022\023\n\017NamenodeCommand\020\000\022\025\n\021CheckPointCom" + + "mand\020\001\"a\n\026CheckpointCommandProto\022,\n\tsign" + + "ature\030\001 \002(\0132\031.CheckpointSignatureProto\022\031" + + "\n\021needToReturnImage\030\002 \002(\010\"A\n\nBlockProto\022" + + "\017\n\007blockId\030\001 \002(\004\022\020\n\010genStamp\030\002 \002(\004\022\020\n\010nu" + + "mBytes\030\003 \001(\004\"\\\n\027BlockWithLocationsProto\022", + "\032\n\005block\030\001 \002(\0132\013.BlockProto\022%\n\013datanodeI" + + "Ds\030\002 \003(\0132\020.DatanodeIDProto\"D\n\030BlocksWith" + + "LocationsProto\022(\n\006blocks\030\001 \003(\0132\030.BlockWi" + + "thLocationsProto\"8\n\022RemoteEditLogProto\022\021" + + "\n\tstartTxId\030\001 \002(\004\022\017\n\007endTxId\030\002 \002(\004\"?\n\032Re" + + "moteEditLogManifestProto\022!\n\004logs\030\001 \003(\0132\023" + + ".RemoteEditLogProto\"\203\001\n\022NamespaceInfoPro" + + "to\022\024\n\014buildVersion\030\001 \002(\t\022\032\n\022distUpgradeV" + + "ersion\030\002 \002(\r\022\023\n\013blockPoolID\030\003 \002(\t\022&\n\013sto" + + "rageInfo\030\004 \002(\0132\021.StorageInfoProto\"D\n\rBlo", + "ckKeyProto\022\r\n\005keyId\030\001 \002(\r\022\022\n\nexpiryDate\030" + + "\002 \002(\004\022\020\n\010keyBytes\030\003 \002(\014\"\254\001\n\026ExportedBloc" + + "kKeysProto\022\033\n\023isBlockTokenEnabled\030\001 \002(\010\022" + + "\031\n\021keyUpdateInterval\030\002 \002(\004\022\025\n\rtokenLifeT" + + "ime\030\003 \002(\004\022\"\n\ncurrentKey\030\004 \002(\0132\016.BlockKey" + + "Proto\022\037\n\007allKeys\030\005 \003(\0132\016.BlockKeyProto\"N" + + "\n\024RecoveringBlockProto\022\023\n\013newGenStamp\030\001 " + + "\002(\004\022!\n\005block\030\002 \002(\0132\022.LocatedBlockProto*G" + + "\n\014ReplicaState\022\r\n\tFINALIZED\020\000\022\007\n\003RBW\020\001\022\007" + + "\n\003RWR\020\002\022\007\n\003RUR\020\003\022\r\n\tTEMPORARY\020\004B6\n%org.a", + "pache.hadoop.hdfs.protocol.protoB\nHdfsPr" + + "otos\240\001\001" }; com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner = new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() { @@ -10421,8 +20401,16 @@ public com.google.protobuf.ExtensionRegistry assignDescriptors( new java.lang.String[] { "Name", "StorageID", "InfoPort", "IpcPort", }, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.Builder.class); - internal_static_DatanodeInfoProto_descriptor = + internal_static_DatanodeIDsProto_descriptor = getDescriptor().getMessageTypes().get(3); + internal_static_DatanodeIDsProto_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_DatanodeIDsProto_descriptor, + new java.lang.String[] { "Datanodes", }, + org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDsProto.class, + org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDsProto.Builder.class); + internal_static_DatanodeInfoProto_descriptor = + getDescriptor().getMessageTypes().get(4); internal_static_DatanodeInfoProto_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_DatanodeInfoProto_descriptor, @@ -10430,7 +20418,7 @@ public com.google.protobuf.ExtensionRegistry assignDescriptors( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder.class); internal_static_ContentSummaryProto_descriptor = - getDescriptor().getMessageTypes().get(4); + getDescriptor().getMessageTypes().get(5); internal_static_ContentSummaryProto_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_ContentSummaryProto_descriptor, @@ -10438,7 +20426,7 @@ public com.google.protobuf.ExtensionRegistry assignDescriptors( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProto.Builder.class); internal_static_CorruptFileBlocksProto_descriptor = - getDescriptor().getMessageTypes().get(5); + getDescriptor().getMessageTypes().get(6); internal_static_CorruptFileBlocksProto_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_CorruptFileBlocksProto_descriptor, @@ -10446,7 +20434,7 @@ public com.google.protobuf.ExtensionRegistry assignDescriptors( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProto.Builder.class); internal_static_FsPermissionProto_descriptor = - getDescriptor().getMessageTypes().get(6); + getDescriptor().getMessageTypes().get(7); internal_static_FsPermissionProto_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_FsPermissionProto_descriptor, @@ -10454,7 +20442,7 @@ public com.google.protobuf.ExtensionRegistry assignDescriptors( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto.Builder.class); internal_static_LocatedBlockProto_descriptor = - getDescriptor().getMessageTypes().get(7); + getDescriptor().getMessageTypes().get(8); internal_static_LocatedBlockProto_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_LocatedBlockProto_descriptor, @@ -10462,7 +20450,7 @@ public com.google.protobuf.ExtensionRegistry assignDescriptors( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.Builder.class); internal_static_LocatedBlocksProto_descriptor = - getDescriptor().getMessageTypes().get(8); + getDescriptor().getMessageTypes().get(9); internal_static_LocatedBlocksProto_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_LocatedBlocksProto_descriptor, @@ -10470,7 +20458,7 @@ public com.google.protobuf.ExtensionRegistry assignDescriptors( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto.Builder.class); internal_static_HdfsFileStatusProto_descriptor = - getDescriptor().getMessageTypes().get(9); + getDescriptor().getMessageTypes().get(10); internal_static_HdfsFileStatusProto_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_HdfsFileStatusProto_descriptor, @@ -10478,7 +20466,7 @@ public com.google.protobuf.ExtensionRegistry assignDescriptors( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.Builder.class); internal_static_FsServerDefaultsProto_descriptor = - getDescriptor().getMessageTypes().get(10); + getDescriptor().getMessageTypes().get(11); internal_static_FsServerDefaultsProto_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_FsServerDefaultsProto_descriptor, @@ -10486,7 +20474,7 @@ public com.google.protobuf.ExtensionRegistry assignDescriptors( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsServerDefaultsProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsServerDefaultsProto.Builder.class); internal_static_DirectoryListingProto_descriptor = - getDescriptor().getMessageTypes().get(11); + getDescriptor().getMessageTypes().get(12); internal_static_DirectoryListingProto_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_DirectoryListingProto_descriptor, @@ -10494,13 +20482,125 @@ public com.google.protobuf.ExtensionRegistry assignDescriptors( org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DirectoryListingProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DirectoryListingProto.Builder.class); internal_static_UpgradeStatusReportProto_descriptor = - getDescriptor().getMessageTypes().get(12); + getDescriptor().getMessageTypes().get(13); internal_static_UpgradeStatusReportProto_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_UpgradeStatusReportProto_descriptor, new java.lang.String[] { "Version", "UpgradeStatus", }, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.UpgradeStatusReportProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.UpgradeStatusReportProto.Builder.class); + internal_static_StorageInfoProto_descriptor = + getDescriptor().getMessageTypes().get(14); + internal_static_StorageInfoProto_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_StorageInfoProto_descriptor, + new java.lang.String[] { "LayoutVersion", "NamespceID", "ClusterID", "CTime", }, + org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto.class, + org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto.Builder.class); + internal_static_NamenodeRegistrationProto_descriptor = + getDescriptor().getMessageTypes().get(15); + internal_static_NamenodeRegistrationProto_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_NamenodeRegistrationProto_descriptor, + new java.lang.String[] { "RpcAddress", "HttpAddress", "StorageInfo", "Role", }, + org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto.class, + org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto.Builder.class); + internal_static_CheckpointSignatureProto_descriptor = + getDescriptor().getMessageTypes().get(16); + internal_static_CheckpointSignatureProto_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_CheckpointSignatureProto_descriptor, + new java.lang.String[] { "BlockPoolId", "MostRecentCheckpointTxId", "CurSegmentTxId", "StorageInfo", }, + org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointSignatureProto.class, + org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointSignatureProto.Builder.class); + internal_static_NamenodeCommandProto_descriptor = + getDescriptor().getMessageTypes().get(17); + internal_static_NamenodeCommandProto_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_NamenodeCommandProto_descriptor, + new java.lang.String[] { "Action", "Type", "CheckpointCmd", }, + org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeCommandProto.class, + org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeCommandProto.Builder.class); + internal_static_CheckpointCommandProto_descriptor = + getDescriptor().getMessageTypes().get(18); + internal_static_CheckpointCommandProto_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_CheckpointCommandProto_descriptor, + new java.lang.String[] { "Signature", "NeedToReturnImage", }, + org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointCommandProto.class, + org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointCommandProto.Builder.class); + internal_static_BlockProto_descriptor = + getDescriptor().getMessageTypes().get(19); + internal_static_BlockProto_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_BlockProto_descriptor, + new java.lang.String[] { "BlockId", "GenStamp", "NumBytes", }, + org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.class, + org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.Builder.class); + internal_static_BlockWithLocationsProto_descriptor = + getDescriptor().getMessageTypes().get(20); + internal_static_BlockWithLocationsProto_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_BlockWithLocationsProto_descriptor, + new java.lang.String[] { "Block", "DatanodeIDs", }, + org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockWithLocationsProto.class, + org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockWithLocationsProto.Builder.class); + internal_static_BlocksWithLocationsProto_descriptor = + getDescriptor().getMessageTypes().get(21); + internal_static_BlocksWithLocationsProto_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_BlocksWithLocationsProto_descriptor, + new java.lang.String[] { "Blocks", }, + org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlocksWithLocationsProto.class, + org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlocksWithLocationsProto.Builder.class); + internal_static_RemoteEditLogProto_descriptor = + getDescriptor().getMessageTypes().get(22); + internal_static_RemoteEditLogProto_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_RemoteEditLogProto_descriptor, + new java.lang.String[] { "StartTxId", "EndTxId", }, + org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogProto.class, + org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogProto.Builder.class); + internal_static_RemoteEditLogManifestProto_descriptor = + getDescriptor().getMessageTypes().get(23); + internal_static_RemoteEditLogManifestProto_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_RemoteEditLogManifestProto_descriptor, + new java.lang.String[] { "Logs", }, + org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProto.class, + org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProto.Builder.class); + internal_static_NamespaceInfoProto_descriptor = + getDescriptor().getMessageTypes().get(24); + internal_static_NamespaceInfoProto_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_NamespaceInfoProto_descriptor, + new java.lang.String[] { "BuildVersion", "DistUpgradeVersion", "BlockPoolID", "StorageInfo", }, + org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto.class, + org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto.Builder.class); + internal_static_BlockKeyProto_descriptor = + getDescriptor().getMessageTypes().get(25); + internal_static_BlockKeyProto_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_BlockKeyProto_descriptor, + new java.lang.String[] { "KeyId", "ExpiryDate", "KeyBytes", }, + org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockKeyProto.class, + org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockKeyProto.Builder.class); + internal_static_ExportedBlockKeysProto_descriptor = + getDescriptor().getMessageTypes().get(26); + internal_static_ExportedBlockKeysProto_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_ExportedBlockKeysProto_descriptor, + new java.lang.String[] { "IsBlockTokenEnabled", "KeyUpdateInterval", "TokenLifeTime", "CurrentKey", "AllKeys", }, + org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExportedBlockKeysProto.class, + org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExportedBlockKeysProto.Builder.class); + internal_static_RecoveringBlockProto_descriptor = + getDescriptor().getMessageTypes().get(27); + internal_static_RecoveringBlockProto_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_RecoveringBlockProto_descriptor, + new java.lang.String[] { "NewGenStamp", "Block", }, + org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RecoveringBlockProto.class, + org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RecoveringBlockProto.Builder.class); return null; } }; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/proto/InterDatanodeProtocolProtos.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/proto/InterDatanodeProtocolProtos.java new file mode 100644 index 0000000000..281a6a6670 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/proto/InterDatanodeProtocolProtos.java @@ -0,0 +1,2516 @@ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: InterDatanodeProtocol.proto + +package org.apache.hadoop.hdfs.protocol.proto; + +public final class InterDatanodeProtocolProtos { + private InterDatanodeProtocolProtos() {} + public static void registerAllExtensions( + com.google.protobuf.ExtensionRegistry registry) { + } + public interface InitReplicaRecoveryRequestProtoOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // required .RecoveringBlockProto block = 1; + boolean hasBlock(); + org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RecoveringBlockProto getBlock(); + org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RecoveringBlockProtoOrBuilder getBlockOrBuilder(); + } + public static final class InitReplicaRecoveryRequestProto extends + com.google.protobuf.GeneratedMessage + implements InitReplicaRecoveryRequestProtoOrBuilder { + // Use InitReplicaRecoveryRequestProto.newBuilder() to construct. + private InitReplicaRecoveryRequestProto(Builder builder) { + super(builder); + } + private InitReplicaRecoveryRequestProto(boolean noInit) {} + + private static final InitReplicaRecoveryRequestProto defaultInstance; + public static InitReplicaRecoveryRequestProto getDefaultInstance() { + return defaultInstance; + } + + public InitReplicaRecoveryRequestProto getDefaultInstanceForType() { + return defaultInstance; + } + + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.internal_static_InitReplicaRecoveryRequestProto_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.internal_static_InitReplicaRecoveryRequestProto_fieldAccessorTable; + } + + private int bitField0_; + // required .RecoveringBlockProto block = 1; + public static final int BLOCK_FIELD_NUMBER = 1; + private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RecoveringBlockProto block_; + public boolean hasBlock() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RecoveringBlockProto getBlock() { + return block_; + } + public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RecoveringBlockProtoOrBuilder getBlockOrBuilder() { + return block_; + } + + private void initFields() { + block_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RecoveringBlockProto.getDefaultInstance(); + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + if (!hasBlock()) { + memoizedIsInitialized = 0; + return false; + } + if (!getBlock().isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeMessage(1, block_); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(1, block_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.InitReplicaRecoveryRequestProto)) { + return super.equals(obj); + } + org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.InitReplicaRecoveryRequestProto other = (org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.InitReplicaRecoveryRequestProto) obj; + + boolean result = true; + result = result && (hasBlock() == other.hasBlock()); + if (hasBlock()) { + result = result && getBlock() + .equals(other.getBlock()); + } + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + @java.lang.Override + public int hashCode() { + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + if (hasBlock()) { + hash = (37 * hash) + BLOCK_FIELD_NUMBER; + hash = (53 * hash) + getBlock().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + return hash; + } + + public static org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.InitReplicaRecoveryRequestProto parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data).buildParsed(); + } + public static org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.InitReplicaRecoveryRequestProto parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data, extensionRegistry) + .buildParsed(); + } + public static org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.InitReplicaRecoveryRequestProto parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data).buildParsed(); + } + public static org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.InitReplicaRecoveryRequestProto parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data, extensionRegistry) + .buildParsed(); + } + public static org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.InitReplicaRecoveryRequestProto parseFrom(java.io.InputStream input) + throws java.io.IOException { + return newBuilder().mergeFrom(input).buildParsed(); + } + public static org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.InitReplicaRecoveryRequestProto parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return newBuilder().mergeFrom(input, extensionRegistry) + .buildParsed(); + } + public static org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.InitReplicaRecoveryRequestProto parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + Builder builder = newBuilder(); + if (builder.mergeDelimitedFrom(input)) { + return builder.buildParsed(); + } else { + return null; + } + } + public static org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.InitReplicaRecoveryRequestProto parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + Builder builder = newBuilder(); + if (builder.mergeDelimitedFrom(input, extensionRegistry)) { + return builder.buildParsed(); + } else { + return null; + } + } + public static org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.InitReplicaRecoveryRequestProto parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return newBuilder().mergeFrom(input).buildParsed(); + } + public static org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.InitReplicaRecoveryRequestProto parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return newBuilder().mergeFrom(input, extensionRegistry) + .buildParsed(); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.InitReplicaRecoveryRequestProto prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.InitReplicaRecoveryRequestProtoOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.internal_static_InitReplicaRecoveryRequestProto_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.internal_static_InitReplicaRecoveryRequestProto_fieldAccessorTable; + } + + // Construct using org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.InitReplicaRecoveryRequestProto.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + getBlockFieldBuilder(); + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + if (blockBuilder_ == null) { + block_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RecoveringBlockProto.getDefaultInstance(); + } else { + blockBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000001); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.InitReplicaRecoveryRequestProto.getDescriptor(); + } + + public org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.InitReplicaRecoveryRequestProto getDefaultInstanceForType() { + return org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.InitReplicaRecoveryRequestProto.getDefaultInstance(); + } + + public org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.InitReplicaRecoveryRequestProto build() { + org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.InitReplicaRecoveryRequestProto result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + private org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.InitReplicaRecoveryRequestProto buildParsed() + throws com.google.protobuf.InvalidProtocolBufferException { + org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.InitReplicaRecoveryRequestProto result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException( + result).asInvalidProtocolBufferException(); + } + return result; + } + + public org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.InitReplicaRecoveryRequestProto buildPartial() { + org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.InitReplicaRecoveryRequestProto result = new org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.InitReplicaRecoveryRequestProto(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + if (blockBuilder_ == null) { + result.block_ = block_; + } else { + result.block_ = blockBuilder_.build(); + } + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.InitReplicaRecoveryRequestProto) { + return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.InitReplicaRecoveryRequestProto)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.InitReplicaRecoveryRequestProto other) { + if (other == org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.InitReplicaRecoveryRequestProto.getDefaultInstance()) return this; + if (other.hasBlock()) { + mergeBlock(other.getBlock()); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + if (!hasBlock()) { + + return false; + } + if (!getBlock().isInitialized()) { + + return false; + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder( + this.getUnknownFields()); + while (true) { + int tag = input.readTag(); + switch (tag) { + case 0: + this.setUnknownFields(unknownFields.build()); + onChanged(); + return this; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + this.setUnknownFields(unknownFields.build()); + onChanged(); + return this; + } + break; + } + case 10: { + org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RecoveringBlockProto.Builder subBuilder = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RecoveringBlockProto.newBuilder(); + if (hasBlock()) { + subBuilder.mergeFrom(getBlock()); + } + input.readMessage(subBuilder, extensionRegistry); + setBlock(subBuilder.buildPartial()); + break; + } + } + } + } + + private int bitField0_; + + // required .RecoveringBlockProto block = 1; + private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RecoveringBlockProto block_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RecoveringBlockProto.getDefaultInstance(); + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RecoveringBlockProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RecoveringBlockProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RecoveringBlockProtoOrBuilder> blockBuilder_; + public boolean hasBlock() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RecoveringBlockProto getBlock() { + if (blockBuilder_ == null) { + return block_; + } else { + return blockBuilder_.getMessage(); + } + } + public Builder setBlock(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RecoveringBlockProto value) { + if (blockBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + block_ = value; + onChanged(); + } else { + blockBuilder_.setMessage(value); + } + bitField0_ |= 0x00000001; + return this; + } + public Builder setBlock( + org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RecoveringBlockProto.Builder builderForValue) { + if (blockBuilder_ == null) { + block_ = builderForValue.build(); + onChanged(); + } else { + blockBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000001; + return this; + } + public Builder mergeBlock(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RecoveringBlockProto value) { + if (blockBuilder_ == null) { + if (((bitField0_ & 0x00000001) == 0x00000001) && + block_ != org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RecoveringBlockProto.getDefaultInstance()) { + block_ = + org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RecoveringBlockProto.newBuilder(block_).mergeFrom(value).buildPartial(); + } else { + block_ = value; + } + onChanged(); + } else { + blockBuilder_.mergeFrom(value); + } + bitField0_ |= 0x00000001; + return this; + } + public Builder clearBlock() { + if (blockBuilder_ == null) { + block_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RecoveringBlockProto.getDefaultInstance(); + onChanged(); + } else { + blockBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000001); + return this; + } + public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RecoveringBlockProto.Builder getBlockBuilder() { + bitField0_ |= 0x00000001; + onChanged(); + return getBlockFieldBuilder().getBuilder(); + } + public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RecoveringBlockProtoOrBuilder getBlockOrBuilder() { + if (blockBuilder_ != null) { + return blockBuilder_.getMessageOrBuilder(); + } else { + return block_; + } + } + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RecoveringBlockProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RecoveringBlockProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RecoveringBlockProtoOrBuilder> + getBlockFieldBuilder() { + if (blockBuilder_ == null) { + blockBuilder_ = new com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RecoveringBlockProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RecoveringBlockProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RecoveringBlockProtoOrBuilder>( + block_, + getParentForChildren(), + isClean()); + block_ = null; + } + return blockBuilder_; + } + + // @@protoc_insertion_point(builder_scope:InitReplicaRecoveryRequestProto) + } + + static { + defaultInstance = new InitReplicaRecoveryRequestProto(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:InitReplicaRecoveryRequestProto) + } + + public interface InitReplicaRecoveryResponseProtoOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // required .ReplicaState state = 1; + boolean hasState(); + org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReplicaState getState(); + + // required .BlockProto block = 2; + boolean hasBlock(); + org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto getBlock(); + org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProtoOrBuilder getBlockOrBuilder(); + } + public static final class InitReplicaRecoveryResponseProto extends + com.google.protobuf.GeneratedMessage + implements InitReplicaRecoveryResponseProtoOrBuilder { + // Use InitReplicaRecoveryResponseProto.newBuilder() to construct. + private InitReplicaRecoveryResponseProto(Builder builder) { + super(builder); + } + private InitReplicaRecoveryResponseProto(boolean noInit) {} + + private static final InitReplicaRecoveryResponseProto defaultInstance; + public static InitReplicaRecoveryResponseProto getDefaultInstance() { + return defaultInstance; + } + + public InitReplicaRecoveryResponseProto getDefaultInstanceForType() { + return defaultInstance; + } + + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.internal_static_InitReplicaRecoveryResponseProto_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.internal_static_InitReplicaRecoveryResponseProto_fieldAccessorTable; + } + + private int bitField0_; + // required .ReplicaState state = 1; + public static final int STATE_FIELD_NUMBER = 1; + private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReplicaState state_; + public boolean hasState() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReplicaState getState() { + return state_; + } + + // required .BlockProto block = 2; + public static final int BLOCK_FIELD_NUMBER = 2; + private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto block_; + public boolean hasBlock() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto getBlock() { + return block_; + } + public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProtoOrBuilder getBlockOrBuilder() { + return block_; + } + + private void initFields() { + state_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReplicaState.FINALIZED; + block_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.getDefaultInstance(); + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + if (!hasState()) { + memoizedIsInitialized = 0; + return false; + } + if (!hasBlock()) { + memoizedIsInitialized = 0; + return false; + } + if (!getBlock().isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeEnum(1, state_.getNumber()); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + output.writeMessage(2, block_); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeEnumSize(1, state_.getNumber()); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(2, block_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.InitReplicaRecoveryResponseProto)) { + return super.equals(obj); + } + org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.InitReplicaRecoveryResponseProto other = (org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.InitReplicaRecoveryResponseProto) obj; + + boolean result = true; + result = result && (hasState() == other.hasState()); + if (hasState()) { + result = result && + (getState() == other.getState()); + } + result = result && (hasBlock() == other.hasBlock()); + if (hasBlock()) { + result = result && getBlock() + .equals(other.getBlock()); + } + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + @java.lang.Override + public int hashCode() { + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + if (hasState()) { + hash = (37 * hash) + STATE_FIELD_NUMBER; + hash = (53 * hash) + hashEnum(getState()); + } + if (hasBlock()) { + hash = (37 * hash) + BLOCK_FIELD_NUMBER; + hash = (53 * hash) + getBlock().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + return hash; + } + + public static org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.InitReplicaRecoveryResponseProto parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data).buildParsed(); + } + public static org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.InitReplicaRecoveryResponseProto parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data, extensionRegistry) + .buildParsed(); + } + public static org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.InitReplicaRecoveryResponseProto parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data).buildParsed(); + } + public static org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.InitReplicaRecoveryResponseProto parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data, extensionRegistry) + .buildParsed(); + } + public static org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.InitReplicaRecoveryResponseProto parseFrom(java.io.InputStream input) + throws java.io.IOException { + return newBuilder().mergeFrom(input).buildParsed(); + } + public static org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.InitReplicaRecoveryResponseProto parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return newBuilder().mergeFrom(input, extensionRegistry) + .buildParsed(); + } + public static org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.InitReplicaRecoveryResponseProto parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + Builder builder = newBuilder(); + if (builder.mergeDelimitedFrom(input)) { + return builder.buildParsed(); + } else { + return null; + } + } + public static org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.InitReplicaRecoveryResponseProto parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + Builder builder = newBuilder(); + if (builder.mergeDelimitedFrom(input, extensionRegistry)) { + return builder.buildParsed(); + } else { + return null; + } + } + public static org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.InitReplicaRecoveryResponseProto parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return newBuilder().mergeFrom(input).buildParsed(); + } + public static org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.InitReplicaRecoveryResponseProto parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return newBuilder().mergeFrom(input, extensionRegistry) + .buildParsed(); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.InitReplicaRecoveryResponseProto prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.InitReplicaRecoveryResponseProtoOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.internal_static_InitReplicaRecoveryResponseProto_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.internal_static_InitReplicaRecoveryResponseProto_fieldAccessorTable; + } + + // Construct using org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.InitReplicaRecoveryResponseProto.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + getBlockFieldBuilder(); + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + state_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReplicaState.FINALIZED; + bitField0_ = (bitField0_ & ~0x00000001); + if (blockBuilder_ == null) { + block_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.getDefaultInstance(); + } else { + blockBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000002); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.InitReplicaRecoveryResponseProto.getDescriptor(); + } + + public org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.InitReplicaRecoveryResponseProto getDefaultInstanceForType() { + return org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.InitReplicaRecoveryResponseProto.getDefaultInstance(); + } + + public org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.InitReplicaRecoveryResponseProto build() { + org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.InitReplicaRecoveryResponseProto result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + private org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.InitReplicaRecoveryResponseProto buildParsed() + throws com.google.protobuf.InvalidProtocolBufferException { + org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.InitReplicaRecoveryResponseProto result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException( + result).asInvalidProtocolBufferException(); + } + return result; + } + + public org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.InitReplicaRecoveryResponseProto buildPartial() { + org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.InitReplicaRecoveryResponseProto result = new org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.InitReplicaRecoveryResponseProto(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + result.state_ = state_; + if (((from_bitField0_ & 0x00000002) == 0x00000002)) { + to_bitField0_ |= 0x00000002; + } + if (blockBuilder_ == null) { + result.block_ = block_; + } else { + result.block_ = blockBuilder_.build(); + } + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.InitReplicaRecoveryResponseProto) { + return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.InitReplicaRecoveryResponseProto)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.InitReplicaRecoveryResponseProto other) { + if (other == org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.InitReplicaRecoveryResponseProto.getDefaultInstance()) return this; + if (other.hasState()) { + setState(other.getState()); + } + if (other.hasBlock()) { + mergeBlock(other.getBlock()); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + if (!hasState()) { + + return false; + } + if (!hasBlock()) { + + return false; + } + if (!getBlock().isInitialized()) { + + return false; + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder( + this.getUnknownFields()); + while (true) { + int tag = input.readTag(); + switch (tag) { + case 0: + this.setUnknownFields(unknownFields.build()); + onChanged(); + return this; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + this.setUnknownFields(unknownFields.build()); + onChanged(); + return this; + } + break; + } + case 8: { + int rawValue = input.readEnum(); + org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReplicaState value = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReplicaState.valueOf(rawValue); + if (value == null) { + unknownFields.mergeVarintField(1, rawValue); + } else { + bitField0_ |= 0x00000001; + state_ = value; + } + break; + } + case 18: { + org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.Builder subBuilder = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.newBuilder(); + if (hasBlock()) { + subBuilder.mergeFrom(getBlock()); + } + input.readMessage(subBuilder, extensionRegistry); + setBlock(subBuilder.buildPartial()); + break; + } + } + } + } + + private int bitField0_; + + // required .ReplicaState state = 1; + private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReplicaState state_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReplicaState.FINALIZED; + public boolean hasState() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReplicaState getState() { + return state_; + } + public Builder setState(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReplicaState value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + state_ = value; + onChanged(); + return this; + } + public Builder clearState() { + bitField0_ = (bitField0_ & ~0x00000001); + state_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReplicaState.FINALIZED; + onChanged(); + return this; + } + + // required .BlockProto block = 2; + private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto block_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.getDefaultInstance(); + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProtoOrBuilder> blockBuilder_; + public boolean hasBlock() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto getBlock() { + if (blockBuilder_ == null) { + return block_; + } else { + return blockBuilder_.getMessage(); + } + } + public Builder setBlock(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto value) { + if (blockBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + block_ = value; + onChanged(); + } else { + blockBuilder_.setMessage(value); + } + bitField0_ |= 0x00000002; + return this; + } + public Builder setBlock( + org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.Builder builderForValue) { + if (blockBuilder_ == null) { + block_ = builderForValue.build(); + onChanged(); + } else { + blockBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000002; + return this; + } + public Builder mergeBlock(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto value) { + if (blockBuilder_ == null) { + if (((bitField0_ & 0x00000002) == 0x00000002) && + block_ != org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.getDefaultInstance()) { + block_ = + org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.newBuilder(block_).mergeFrom(value).buildPartial(); + } else { + block_ = value; + } + onChanged(); + } else { + blockBuilder_.mergeFrom(value); + } + bitField0_ |= 0x00000002; + return this; + } + public Builder clearBlock() { + if (blockBuilder_ == null) { + block_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.getDefaultInstance(); + onChanged(); + } else { + blockBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000002); + return this; + } + public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.Builder getBlockBuilder() { + bitField0_ |= 0x00000002; + onChanged(); + return getBlockFieldBuilder().getBuilder(); + } + public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProtoOrBuilder getBlockOrBuilder() { + if (blockBuilder_ != null) { + return blockBuilder_.getMessageOrBuilder(); + } else { + return block_; + } + } + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProtoOrBuilder> + getBlockFieldBuilder() { + if (blockBuilder_ == null) { + blockBuilder_ = new com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProtoOrBuilder>( + block_, + getParentForChildren(), + isClean()); + block_ = null; + } + return blockBuilder_; + } + + // @@protoc_insertion_point(builder_scope:InitReplicaRecoveryResponseProto) + } + + static { + defaultInstance = new InitReplicaRecoveryResponseProto(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:InitReplicaRecoveryResponseProto) + } + + public interface UpdateReplicaUnderRecoveryRequestProtoOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // required .ExtendedBlockProto block = 1; + boolean hasBlock(); + org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto getBlock(); + org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProtoOrBuilder getBlockOrBuilder(); + + // required uint64 recoveryId = 2; + boolean hasRecoveryId(); + long getRecoveryId(); + + // required uint64 newLength = 3; + boolean hasNewLength(); + long getNewLength(); + } + public static final class UpdateReplicaUnderRecoveryRequestProto extends + com.google.protobuf.GeneratedMessage + implements UpdateReplicaUnderRecoveryRequestProtoOrBuilder { + // Use UpdateReplicaUnderRecoveryRequestProto.newBuilder() to construct. + private UpdateReplicaUnderRecoveryRequestProto(Builder builder) { + super(builder); + } + private UpdateReplicaUnderRecoveryRequestProto(boolean noInit) {} + + private static final UpdateReplicaUnderRecoveryRequestProto defaultInstance; + public static UpdateReplicaUnderRecoveryRequestProto getDefaultInstance() { + return defaultInstance; + } + + public UpdateReplicaUnderRecoveryRequestProto getDefaultInstanceForType() { + return defaultInstance; + } + + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.internal_static_UpdateReplicaUnderRecoveryRequestProto_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.internal_static_UpdateReplicaUnderRecoveryRequestProto_fieldAccessorTable; + } + + private int bitField0_; + // required .ExtendedBlockProto block = 1; + public static final int BLOCK_FIELD_NUMBER = 1; + private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto block_; + public boolean hasBlock() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto getBlock() { + return block_; + } + public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProtoOrBuilder getBlockOrBuilder() { + return block_; + } + + // required uint64 recoveryId = 2; + public static final int RECOVERYID_FIELD_NUMBER = 2; + private long recoveryId_; + public boolean hasRecoveryId() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + public long getRecoveryId() { + return recoveryId_; + } + + // required uint64 newLength = 3; + public static final int NEWLENGTH_FIELD_NUMBER = 3; + private long newLength_; + public boolean hasNewLength() { + return ((bitField0_ & 0x00000004) == 0x00000004); + } + public long getNewLength() { + return newLength_; + } + + private void initFields() { + block_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.getDefaultInstance(); + recoveryId_ = 0L; + newLength_ = 0L; + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + if (!hasBlock()) { + memoizedIsInitialized = 0; + return false; + } + if (!hasRecoveryId()) { + memoizedIsInitialized = 0; + return false; + } + if (!hasNewLength()) { + memoizedIsInitialized = 0; + return false; + } + if (!getBlock().isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeMessage(1, block_); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + output.writeUInt64(2, recoveryId_); + } + if (((bitField0_ & 0x00000004) == 0x00000004)) { + output.writeUInt64(3, newLength_); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(1, block_); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + size += com.google.protobuf.CodedOutputStream + .computeUInt64Size(2, recoveryId_); + } + if (((bitField0_ & 0x00000004) == 0x00000004)) { + size += com.google.protobuf.CodedOutputStream + .computeUInt64Size(3, newLength_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.UpdateReplicaUnderRecoveryRequestProto)) { + return super.equals(obj); + } + org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.UpdateReplicaUnderRecoveryRequestProto other = (org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.UpdateReplicaUnderRecoveryRequestProto) obj; + + boolean result = true; + result = result && (hasBlock() == other.hasBlock()); + if (hasBlock()) { + result = result && getBlock() + .equals(other.getBlock()); + } + result = result && (hasRecoveryId() == other.hasRecoveryId()); + if (hasRecoveryId()) { + result = result && (getRecoveryId() + == other.getRecoveryId()); + } + result = result && (hasNewLength() == other.hasNewLength()); + if (hasNewLength()) { + result = result && (getNewLength() + == other.getNewLength()); + } + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + @java.lang.Override + public int hashCode() { + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + if (hasBlock()) { + hash = (37 * hash) + BLOCK_FIELD_NUMBER; + hash = (53 * hash) + getBlock().hashCode(); + } + if (hasRecoveryId()) { + hash = (37 * hash) + RECOVERYID_FIELD_NUMBER; + hash = (53 * hash) + hashLong(getRecoveryId()); + } + if (hasNewLength()) { + hash = (37 * hash) + NEWLENGTH_FIELD_NUMBER; + hash = (53 * hash) + hashLong(getNewLength()); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + return hash; + } + + public static org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.UpdateReplicaUnderRecoveryRequestProto parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data).buildParsed(); + } + public static org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.UpdateReplicaUnderRecoveryRequestProto parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data, extensionRegistry) + .buildParsed(); + } + public static org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.UpdateReplicaUnderRecoveryRequestProto parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data).buildParsed(); + } + public static org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.UpdateReplicaUnderRecoveryRequestProto parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data, extensionRegistry) + .buildParsed(); + } + public static org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.UpdateReplicaUnderRecoveryRequestProto parseFrom(java.io.InputStream input) + throws java.io.IOException { + return newBuilder().mergeFrom(input).buildParsed(); + } + public static org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.UpdateReplicaUnderRecoveryRequestProto parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return newBuilder().mergeFrom(input, extensionRegistry) + .buildParsed(); + } + public static org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.UpdateReplicaUnderRecoveryRequestProto parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + Builder builder = newBuilder(); + if (builder.mergeDelimitedFrom(input)) { + return builder.buildParsed(); + } else { + return null; + } + } + public static org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.UpdateReplicaUnderRecoveryRequestProto parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + Builder builder = newBuilder(); + if (builder.mergeDelimitedFrom(input, extensionRegistry)) { + return builder.buildParsed(); + } else { + return null; + } + } + public static org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.UpdateReplicaUnderRecoveryRequestProto parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return newBuilder().mergeFrom(input).buildParsed(); + } + public static org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.UpdateReplicaUnderRecoveryRequestProto parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return newBuilder().mergeFrom(input, extensionRegistry) + .buildParsed(); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.UpdateReplicaUnderRecoveryRequestProto prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.UpdateReplicaUnderRecoveryRequestProtoOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.internal_static_UpdateReplicaUnderRecoveryRequestProto_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.internal_static_UpdateReplicaUnderRecoveryRequestProto_fieldAccessorTable; + } + + // Construct using org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.UpdateReplicaUnderRecoveryRequestProto.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + getBlockFieldBuilder(); + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + if (blockBuilder_ == null) { + block_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.getDefaultInstance(); + } else { + blockBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000001); + recoveryId_ = 0L; + bitField0_ = (bitField0_ & ~0x00000002); + newLength_ = 0L; + bitField0_ = (bitField0_ & ~0x00000004); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.UpdateReplicaUnderRecoveryRequestProto.getDescriptor(); + } + + public org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.UpdateReplicaUnderRecoveryRequestProto getDefaultInstanceForType() { + return org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.UpdateReplicaUnderRecoveryRequestProto.getDefaultInstance(); + } + + public org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.UpdateReplicaUnderRecoveryRequestProto build() { + org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.UpdateReplicaUnderRecoveryRequestProto result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + private org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.UpdateReplicaUnderRecoveryRequestProto buildParsed() + throws com.google.protobuf.InvalidProtocolBufferException { + org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.UpdateReplicaUnderRecoveryRequestProto result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException( + result).asInvalidProtocolBufferException(); + } + return result; + } + + public org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.UpdateReplicaUnderRecoveryRequestProto buildPartial() { + org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.UpdateReplicaUnderRecoveryRequestProto result = new org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.UpdateReplicaUnderRecoveryRequestProto(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + if (blockBuilder_ == null) { + result.block_ = block_; + } else { + result.block_ = blockBuilder_.build(); + } + if (((from_bitField0_ & 0x00000002) == 0x00000002)) { + to_bitField0_ |= 0x00000002; + } + result.recoveryId_ = recoveryId_; + if (((from_bitField0_ & 0x00000004) == 0x00000004)) { + to_bitField0_ |= 0x00000004; + } + result.newLength_ = newLength_; + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.UpdateReplicaUnderRecoveryRequestProto) { + return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.UpdateReplicaUnderRecoveryRequestProto)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.UpdateReplicaUnderRecoveryRequestProto other) { + if (other == org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.UpdateReplicaUnderRecoveryRequestProto.getDefaultInstance()) return this; + if (other.hasBlock()) { + mergeBlock(other.getBlock()); + } + if (other.hasRecoveryId()) { + setRecoveryId(other.getRecoveryId()); + } + if (other.hasNewLength()) { + setNewLength(other.getNewLength()); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + if (!hasBlock()) { + + return false; + } + if (!hasRecoveryId()) { + + return false; + } + if (!hasNewLength()) { + + return false; + } + if (!getBlock().isInitialized()) { + + return false; + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder( + this.getUnknownFields()); + while (true) { + int tag = input.readTag(); + switch (tag) { + case 0: + this.setUnknownFields(unknownFields.build()); + onChanged(); + return this; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + this.setUnknownFields(unknownFields.build()); + onChanged(); + return this; + } + break; + } + case 10: { + org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.Builder subBuilder = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.newBuilder(); + if (hasBlock()) { + subBuilder.mergeFrom(getBlock()); + } + input.readMessage(subBuilder, extensionRegistry); + setBlock(subBuilder.buildPartial()); + break; + } + case 16: { + bitField0_ |= 0x00000002; + recoveryId_ = input.readUInt64(); + break; + } + case 24: { + bitField0_ |= 0x00000004; + newLength_ = input.readUInt64(); + break; + } + } + } + } + + private int bitField0_; + + // required .ExtendedBlockProto block = 1; + private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto block_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.getDefaultInstance(); + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProtoOrBuilder> blockBuilder_; + public boolean hasBlock() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto getBlock() { + if (blockBuilder_ == null) { + return block_; + } else { + return blockBuilder_.getMessage(); + } + } + public Builder setBlock(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto value) { + if (blockBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + block_ = value; + onChanged(); + } else { + blockBuilder_.setMessage(value); + } + bitField0_ |= 0x00000001; + return this; + } + public Builder setBlock( + org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.Builder builderForValue) { + if (blockBuilder_ == null) { + block_ = builderForValue.build(); + onChanged(); + } else { + blockBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000001; + return this; + } + public Builder mergeBlock(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto value) { + if (blockBuilder_ == null) { + if (((bitField0_ & 0x00000001) == 0x00000001) && + block_ != org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.getDefaultInstance()) { + block_ = + org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.newBuilder(block_).mergeFrom(value).buildPartial(); + } else { + block_ = value; + } + onChanged(); + } else { + blockBuilder_.mergeFrom(value); + } + bitField0_ |= 0x00000001; + return this; + } + public Builder clearBlock() { + if (blockBuilder_ == null) { + block_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.getDefaultInstance(); + onChanged(); + } else { + blockBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000001); + return this; + } + public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.Builder getBlockBuilder() { + bitField0_ |= 0x00000001; + onChanged(); + return getBlockFieldBuilder().getBuilder(); + } + public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProtoOrBuilder getBlockOrBuilder() { + if (blockBuilder_ != null) { + return blockBuilder_.getMessageOrBuilder(); + } else { + return block_; + } + } + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProtoOrBuilder> + getBlockFieldBuilder() { + if (blockBuilder_ == null) { + blockBuilder_ = new com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProtoOrBuilder>( + block_, + getParentForChildren(), + isClean()); + block_ = null; + } + return blockBuilder_; + } + + // required uint64 recoveryId = 2; + private long recoveryId_ ; + public boolean hasRecoveryId() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + public long getRecoveryId() { + return recoveryId_; + } + public Builder setRecoveryId(long value) { + bitField0_ |= 0x00000002; + recoveryId_ = value; + onChanged(); + return this; + } + public Builder clearRecoveryId() { + bitField0_ = (bitField0_ & ~0x00000002); + recoveryId_ = 0L; + onChanged(); + return this; + } + + // required uint64 newLength = 3; + private long newLength_ ; + public boolean hasNewLength() { + return ((bitField0_ & 0x00000004) == 0x00000004); + } + public long getNewLength() { + return newLength_; + } + public Builder setNewLength(long value) { + bitField0_ |= 0x00000004; + newLength_ = value; + onChanged(); + return this; + } + public Builder clearNewLength() { + bitField0_ = (bitField0_ & ~0x00000004); + newLength_ = 0L; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:UpdateReplicaUnderRecoveryRequestProto) + } + + static { + defaultInstance = new UpdateReplicaUnderRecoveryRequestProto(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:UpdateReplicaUnderRecoveryRequestProto) + } + + public interface UpdateReplicaUnderRecoveryResponseProtoOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // required .ExtendedBlockProto block = 1; + boolean hasBlock(); + org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto getBlock(); + org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProtoOrBuilder getBlockOrBuilder(); + } + public static final class UpdateReplicaUnderRecoveryResponseProto extends + com.google.protobuf.GeneratedMessage + implements UpdateReplicaUnderRecoveryResponseProtoOrBuilder { + // Use UpdateReplicaUnderRecoveryResponseProto.newBuilder() to construct. + private UpdateReplicaUnderRecoveryResponseProto(Builder builder) { + super(builder); + } + private UpdateReplicaUnderRecoveryResponseProto(boolean noInit) {} + + private static final UpdateReplicaUnderRecoveryResponseProto defaultInstance; + public static UpdateReplicaUnderRecoveryResponseProto getDefaultInstance() { + return defaultInstance; + } + + public UpdateReplicaUnderRecoveryResponseProto getDefaultInstanceForType() { + return defaultInstance; + } + + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.internal_static_UpdateReplicaUnderRecoveryResponseProto_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.internal_static_UpdateReplicaUnderRecoveryResponseProto_fieldAccessorTable; + } + + private int bitField0_; + // required .ExtendedBlockProto block = 1; + public static final int BLOCK_FIELD_NUMBER = 1; + private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto block_; + public boolean hasBlock() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto getBlock() { + return block_; + } + public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProtoOrBuilder getBlockOrBuilder() { + return block_; + } + + private void initFields() { + block_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.getDefaultInstance(); + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + if (!hasBlock()) { + memoizedIsInitialized = 0; + return false; + } + if (!getBlock().isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeMessage(1, block_); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(1, block_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.UpdateReplicaUnderRecoveryResponseProto)) { + return super.equals(obj); + } + org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.UpdateReplicaUnderRecoveryResponseProto other = (org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.UpdateReplicaUnderRecoveryResponseProto) obj; + + boolean result = true; + result = result && (hasBlock() == other.hasBlock()); + if (hasBlock()) { + result = result && getBlock() + .equals(other.getBlock()); + } + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + @java.lang.Override + public int hashCode() { + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + if (hasBlock()) { + hash = (37 * hash) + BLOCK_FIELD_NUMBER; + hash = (53 * hash) + getBlock().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + return hash; + } + + public static org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.UpdateReplicaUnderRecoveryResponseProto parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data).buildParsed(); + } + public static org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.UpdateReplicaUnderRecoveryResponseProto parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data, extensionRegistry) + .buildParsed(); + } + public static org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.UpdateReplicaUnderRecoveryResponseProto parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data).buildParsed(); + } + public static org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.UpdateReplicaUnderRecoveryResponseProto parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data, extensionRegistry) + .buildParsed(); + } + public static org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.UpdateReplicaUnderRecoveryResponseProto parseFrom(java.io.InputStream input) + throws java.io.IOException { + return newBuilder().mergeFrom(input).buildParsed(); + } + public static org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.UpdateReplicaUnderRecoveryResponseProto parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return newBuilder().mergeFrom(input, extensionRegistry) + .buildParsed(); + } + public static org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.UpdateReplicaUnderRecoveryResponseProto parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + Builder builder = newBuilder(); + if (builder.mergeDelimitedFrom(input)) { + return builder.buildParsed(); + } else { + return null; + } + } + public static org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.UpdateReplicaUnderRecoveryResponseProto parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + Builder builder = newBuilder(); + if (builder.mergeDelimitedFrom(input, extensionRegistry)) { + return builder.buildParsed(); + } else { + return null; + } + } + public static org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.UpdateReplicaUnderRecoveryResponseProto parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return newBuilder().mergeFrom(input).buildParsed(); + } + public static org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.UpdateReplicaUnderRecoveryResponseProto parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return newBuilder().mergeFrom(input, extensionRegistry) + .buildParsed(); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.UpdateReplicaUnderRecoveryResponseProto prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.UpdateReplicaUnderRecoveryResponseProtoOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.internal_static_UpdateReplicaUnderRecoveryResponseProto_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.internal_static_UpdateReplicaUnderRecoveryResponseProto_fieldAccessorTable; + } + + // Construct using org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.UpdateReplicaUnderRecoveryResponseProto.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + getBlockFieldBuilder(); + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + if (blockBuilder_ == null) { + block_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.getDefaultInstance(); + } else { + blockBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000001); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.UpdateReplicaUnderRecoveryResponseProto.getDescriptor(); + } + + public org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.UpdateReplicaUnderRecoveryResponseProto getDefaultInstanceForType() { + return org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.UpdateReplicaUnderRecoveryResponseProto.getDefaultInstance(); + } + + public org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.UpdateReplicaUnderRecoveryResponseProto build() { + org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.UpdateReplicaUnderRecoveryResponseProto result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + private org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.UpdateReplicaUnderRecoveryResponseProto buildParsed() + throws com.google.protobuf.InvalidProtocolBufferException { + org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.UpdateReplicaUnderRecoveryResponseProto result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException( + result).asInvalidProtocolBufferException(); + } + return result; + } + + public org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.UpdateReplicaUnderRecoveryResponseProto buildPartial() { + org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.UpdateReplicaUnderRecoveryResponseProto result = new org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.UpdateReplicaUnderRecoveryResponseProto(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + if (blockBuilder_ == null) { + result.block_ = block_; + } else { + result.block_ = blockBuilder_.build(); + } + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.UpdateReplicaUnderRecoveryResponseProto) { + return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.UpdateReplicaUnderRecoveryResponseProto)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.UpdateReplicaUnderRecoveryResponseProto other) { + if (other == org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.UpdateReplicaUnderRecoveryResponseProto.getDefaultInstance()) return this; + if (other.hasBlock()) { + mergeBlock(other.getBlock()); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + if (!hasBlock()) { + + return false; + } + if (!getBlock().isInitialized()) { + + return false; + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder( + this.getUnknownFields()); + while (true) { + int tag = input.readTag(); + switch (tag) { + case 0: + this.setUnknownFields(unknownFields.build()); + onChanged(); + return this; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + this.setUnknownFields(unknownFields.build()); + onChanged(); + return this; + } + break; + } + case 10: { + org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.Builder subBuilder = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.newBuilder(); + if (hasBlock()) { + subBuilder.mergeFrom(getBlock()); + } + input.readMessage(subBuilder, extensionRegistry); + setBlock(subBuilder.buildPartial()); + break; + } + } + } + } + + private int bitField0_; + + // required .ExtendedBlockProto block = 1; + private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto block_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.getDefaultInstance(); + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProtoOrBuilder> blockBuilder_; + public boolean hasBlock() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto getBlock() { + if (blockBuilder_ == null) { + return block_; + } else { + return blockBuilder_.getMessage(); + } + } + public Builder setBlock(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto value) { + if (blockBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + block_ = value; + onChanged(); + } else { + blockBuilder_.setMessage(value); + } + bitField0_ |= 0x00000001; + return this; + } + public Builder setBlock( + org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.Builder builderForValue) { + if (blockBuilder_ == null) { + block_ = builderForValue.build(); + onChanged(); + } else { + blockBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000001; + return this; + } + public Builder mergeBlock(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto value) { + if (blockBuilder_ == null) { + if (((bitField0_ & 0x00000001) == 0x00000001) && + block_ != org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.getDefaultInstance()) { + block_ = + org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.newBuilder(block_).mergeFrom(value).buildPartial(); + } else { + block_ = value; + } + onChanged(); + } else { + blockBuilder_.mergeFrom(value); + } + bitField0_ |= 0x00000001; + return this; + } + public Builder clearBlock() { + if (blockBuilder_ == null) { + block_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.getDefaultInstance(); + onChanged(); + } else { + blockBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000001); + return this; + } + public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.Builder getBlockBuilder() { + bitField0_ |= 0x00000001; + onChanged(); + return getBlockFieldBuilder().getBuilder(); + } + public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProtoOrBuilder getBlockOrBuilder() { + if (blockBuilder_ != null) { + return blockBuilder_.getMessageOrBuilder(); + } else { + return block_; + } + } + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProtoOrBuilder> + getBlockFieldBuilder() { + if (blockBuilder_ == null) { + blockBuilder_ = new com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProtoOrBuilder>( + block_, + getParentForChildren(), + isClean()); + block_ = null; + } + return blockBuilder_; + } + + // @@protoc_insertion_point(builder_scope:UpdateReplicaUnderRecoveryResponseProto) + } + + static { + defaultInstance = new UpdateReplicaUnderRecoveryResponseProto(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:UpdateReplicaUnderRecoveryResponseProto) + } + + public static abstract class InterDatanodeProtocolService + implements com.google.protobuf.Service { + protected InterDatanodeProtocolService() {} + + public interface Interface { + public abstract void initReplicaRecovery( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.InitReplicaRecoveryRequestProto request, + com.google.protobuf.RpcCallback done); + + public abstract void updateReplicaUnderRecovery( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.UpdateReplicaUnderRecoveryRequestProto request, + com.google.protobuf.RpcCallback done); + + } + + public static com.google.protobuf.Service newReflectiveService( + final Interface impl) { + return new InterDatanodeProtocolService() { + @java.lang.Override + public void initReplicaRecovery( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.InitReplicaRecoveryRequestProto request, + com.google.protobuf.RpcCallback done) { + impl.initReplicaRecovery(controller, request, done); + } + + @java.lang.Override + public void updateReplicaUnderRecovery( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.UpdateReplicaUnderRecoveryRequestProto request, + com.google.protobuf.RpcCallback done) { + impl.updateReplicaUnderRecovery(controller, request, done); + } + + }; + } + + public static com.google.protobuf.BlockingService + newReflectiveBlockingService(final BlockingInterface impl) { + return new com.google.protobuf.BlockingService() { + public final com.google.protobuf.Descriptors.ServiceDescriptor + getDescriptorForType() { + return getDescriptor(); + } + + public final com.google.protobuf.Message callBlockingMethod( + com.google.protobuf.Descriptors.MethodDescriptor method, + com.google.protobuf.RpcController controller, + com.google.protobuf.Message request) + throws com.google.protobuf.ServiceException { + if (method.getService() != getDescriptor()) { + throw new java.lang.IllegalArgumentException( + "Service.callBlockingMethod() given method descriptor for " + + "wrong service type."); + } + switch(method.getIndex()) { + case 0: + return impl.initReplicaRecovery(controller, (org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.InitReplicaRecoveryRequestProto)request); + case 1: + return impl.updateReplicaUnderRecovery(controller, (org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.UpdateReplicaUnderRecoveryRequestProto)request); + default: + throw new java.lang.AssertionError("Can't get here."); + } + } + + public final com.google.protobuf.Message + getRequestPrototype( + com.google.protobuf.Descriptors.MethodDescriptor method) { + if (method.getService() != getDescriptor()) { + throw new java.lang.IllegalArgumentException( + "Service.getRequestPrototype() given method " + + "descriptor for wrong service type."); + } + switch(method.getIndex()) { + case 0: + return org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.InitReplicaRecoveryRequestProto.getDefaultInstance(); + case 1: + return org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.UpdateReplicaUnderRecoveryRequestProto.getDefaultInstance(); + default: + throw new java.lang.AssertionError("Can't get here."); + } + } + + public final com.google.protobuf.Message + getResponsePrototype( + com.google.protobuf.Descriptors.MethodDescriptor method) { + if (method.getService() != getDescriptor()) { + throw new java.lang.IllegalArgumentException( + "Service.getResponsePrototype() given method " + + "descriptor for wrong service type."); + } + switch(method.getIndex()) { + case 0: + return org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.InitReplicaRecoveryResponseProto.getDefaultInstance(); + case 1: + return org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.UpdateReplicaUnderRecoveryResponseProto.getDefaultInstance(); + default: + throw new java.lang.AssertionError("Can't get here."); + } + } + + }; + } + + public abstract void initReplicaRecovery( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.InitReplicaRecoveryRequestProto request, + com.google.protobuf.RpcCallback done); + + public abstract void updateReplicaUnderRecovery( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.UpdateReplicaUnderRecoveryRequestProto request, + com.google.protobuf.RpcCallback done); + + public static final + com.google.protobuf.Descriptors.ServiceDescriptor + getDescriptor() { + return org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.getDescriptor().getServices().get(0); + } + public final com.google.protobuf.Descriptors.ServiceDescriptor + getDescriptorForType() { + return getDescriptor(); + } + + public final void callMethod( + com.google.protobuf.Descriptors.MethodDescriptor method, + com.google.protobuf.RpcController controller, + com.google.protobuf.Message request, + com.google.protobuf.RpcCallback< + com.google.protobuf.Message> done) { + if (method.getService() != getDescriptor()) { + throw new java.lang.IllegalArgumentException( + "Service.callMethod() given method descriptor for wrong " + + "service type."); + } + switch(method.getIndex()) { + case 0: + this.initReplicaRecovery(controller, (org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.InitReplicaRecoveryRequestProto)request, + com.google.protobuf.RpcUtil.specializeCallback( + done)); + return; + case 1: + this.updateReplicaUnderRecovery(controller, (org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.UpdateReplicaUnderRecoveryRequestProto)request, + com.google.protobuf.RpcUtil.specializeCallback( + done)); + return; + default: + throw new java.lang.AssertionError("Can't get here."); + } + } + + public final com.google.protobuf.Message + getRequestPrototype( + com.google.protobuf.Descriptors.MethodDescriptor method) { + if (method.getService() != getDescriptor()) { + throw new java.lang.IllegalArgumentException( + "Service.getRequestPrototype() given method " + + "descriptor for wrong service type."); + } + switch(method.getIndex()) { + case 0: + return org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.InitReplicaRecoveryRequestProto.getDefaultInstance(); + case 1: + return org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.UpdateReplicaUnderRecoveryRequestProto.getDefaultInstance(); + default: + throw new java.lang.AssertionError("Can't get here."); + } + } + + public final com.google.protobuf.Message + getResponsePrototype( + com.google.protobuf.Descriptors.MethodDescriptor method) { + if (method.getService() != getDescriptor()) { + throw new java.lang.IllegalArgumentException( + "Service.getResponsePrototype() given method " + + "descriptor for wrong service type."); + } + switch(method.getIndex()) { + case 0: + return org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.InitReplicaRecoveryResponseProto.getDefaultInstance(); + case 1: + return org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.UpdateReplicaUnderRecoveryResponseProto.getDefaultInstance(); + default: + throw new java.lang.AssertionError("Can't get here."); + } + } + + public static Stub newStub( + com.google.protobuf.RpcChannel channel) { + return new Stub(channel); + } + + public static final class Stub extends org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.InterDatanodeProtocolService implements Interface { + private Stub(com.google.protobuf.RpcChannel channel) { + this.channel = channel; + } + + private final com.google.protobuf.RpcChannel channel; + + public com.google.protobuf.RpcChannel getChannel() { + return channel; + } + + public void initReplicaRecovery( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.InitReplicaRecoveryRequestProto request, + com.google.protobuf.RpcCallback done) { + channel.callMethod( + getDescriptor().getMethods().get(0), + controller, + request, + org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.InitReplicaRecoveryResponseProto.getDefaultInstance(), + com.google.protobuf.RpcUtil.generalizeCallback( + done, + org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.InitReplicaRecoveryResponseProto.class, + org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.InitReplicaRecoveryResponseProto.getDefaultInstance())); + } + + public void updateReplicaUnderRecovery( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.UpdateReplicaUnderRecoveryRequestProto request, + com.google.protobuf.RpcCallback done) { + channel.callMethod( + getDescriptor().getMethods().get(1), + controller, + request, + org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.UpdateReplicaUnderRecoveryResponseProto.getDefaultInstance(), + com.google.protobuf.RpcUtil.generalizeCallback( + done, + org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.UpdateReplicaUnderRecoveryResponseProto.class, + org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.UpdateReplicaUnderRecoveryResponseProto.getDefaultInstance())); + } + } + + public static BlockingInterface newBlockingStub( + com.google.protobuf.BlockingRpcChannel channel) { + return new BlockingStub(channel); + } + + public interface BlockingInterface { + public org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.InitReplicaRecoveryResponseProto initReplicaRecovery( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.InitReplicaRecoveryRequestProto request) + throws com.google.protobuf.ServiceException; + + public org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.UpdateReplicaUnderRecoveryResponseProto updateReplicaUnderRecovery( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.UpdateReplicaUnderRecoveryRequestProto request) + throws com.google.protobuf.ServiceException; + } + + private static final class BlockingStub implements BlockingInterface { + private BlockingStub(com.google.protobuf.BlockingRpcChannel channel) { + this.channel = channel; + } + + private final com.google.protobuf.BlockingRpcChannel channel; + + public org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.InitReplicaRecoveryResponseProto initReplicaRecovery( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.InitReplicaRecoveryRequestProto request) + throws com.google.protobuf.ServiceException { + return (org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.InitReplicaRecoveryResponseProto) channel.callBlockingMethod( + getDescriptor().getMethods().get(0), + controller, + request, + org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.InitReplicaRecoveryResponseProto.getDefaultInstance()); + } + + + public org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.UpdateReplicaUnderRecoveryResponseProto updateReplicaUnderRecovery( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.UpdateReplicaUnderRecoveryRequestProto request) + throws com.google.protobuf.ServiceException { + return (org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.UpdateReplicaUnderRecoveryResponseProto) channel.callBlockingMethod( + getDescriptor().getMethods().get(1), + controller, + request, + org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.UpdateReplicaUnderRecoveryResponseProto.getDefaultInstance()); + } + + } + } + + private static com.google.protobuf.Descriptors.Descriptor + internal_static_InitReplicaRecoveryRequestProto_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_InitReplicaRecoveryRequestProto_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_InitReplicaRecoveryResponseProto_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_InitReplicaRecoveryResponseProto_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_UpdateReplicaUnderRecoveryRequestProto_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_UpdateReplicaUnderRecoveryRequestProto_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_UpdateReplicaUnderRecoveryResponseProto_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_UpdateReplicaUnderRecoveryResponseProto_fieldAccessorTable; + + public static com.google.protobuf.Descriptors.FileDescriptor + getDescriptor() { + return descriptor; + } + private static com.google.protobuf.Descriptors.FileDescriptor + descriptor; + static { + java.lang.String[] descriptorData = { + "\n\033InterDatanodeProtocol.proto\032\nhdfs.prot" + + "o\"G\n\037InitReplicaRecoveryRequestProto\022$\n\005" + + "block\030\001 \002(\0132\025.RecoveringBlockProto\"\\\n In" + + "itReplicaRecoveryResponseProto\022\034\n\005state\030" + + "\001 \002(\0162\r.ReplicaState\022\032\n\005block\030\002 \002(\0132\013.Bl" + + "ockProto\"s\n&UpdateReplicaUnderRecoveryRe" + + "questProto\022\"\n\005block\030\001 \002(\0132\023.ExtendedBloc" + + "kProto\022\022\n\nrecoveryId\030\002 \002(\004\022\021\n\tnewLength\030" + + "\003 \002(\004\"M\n\'UpdateReplicaUnderRecoveryRespo" + + "nseProto\022\"\n\005block\030\001 \002(\0132\023.ExtendedBlockP", + "roto2\353\001\n\034InterDatanodeProtocolService\022Z\n" + + "\023initReplicaRecovery\022 .InitReplicaRecove" + + "ryRequestProto\032!.InitReplicaRecoveryResp" + + "onseProto\022o\n\032updateReplicaUnderRecovery\022" + + "\'.UpdateReplicaUnderRecoveryRequestProto" + + "\032(.UpdateReplicaUnderRecoveryResponsePro" + + "toBJ\n%org.apache.hadoop.hdfs.protocol.pr" + + "otoB\033InterDatanodeProtocolProtos\210\001\001\240\001\001" + }; + com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner = + new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() { + public com.google.protobuf.ExtensionRegistry assignDescriptors( + com.google.protobuf.Descriptors.FileDescriptor root) { + descriptor = root; + internal_static_InitReplicaRecoveryRequestProto_descriptor = + getDescriptor().getMessageTypes().get(0); + internal_static_InitReplicaRecoveryRequestProto_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_InitReplicaRecoveryRequestProto_descriptor, + new java.lang.String[] { "Block", }, + org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.InitReplicaRecoveryRequestProto.class, + org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.InitReplicaRecoveryRequestProto.Builder.class); + internal_static_InitReplicaRecoveryResponseProto_descriptor = + getDescriptor().getMessageTypes().get(1); + internal_static_InitReplicaRecoveryResponseProto_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_InitReplicaRecoveryResponseProto_descriptor, + new java.lang.String[] { "State", "Block", }, + org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.InitReplicaRecoveryResponseProto.class, + org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.InitReplicaRecoveryResponseProto.Builder.class); + internal_static_UpdateReplicaUnderRecoveryRequestProto_descriptor = + getDescriptor().getMessageTypes().get(2); + internal_static_UpdateReplicaUnderRecoveryRequestProto_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_UpdateReplicaUnderRecoveryRequestProto_descriptor, + new java.lang.String[] { "Block", "RecoveryId", "NewLength", }, + org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.UpdateReplicaUnderRecoveryRequestProto.class, + org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.UpdateReplicaUnderRecoveryRequestProto.Builder.class); + internal_static_UpdateReplicaUnderRecoveryResponseProto_descriptor = + getDescriptor().getMessageTypes().get(3); + internal_static_UpdateReplicaUnderRecoveryResponseProto_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_UpdateReplicaUnderRecoveryResponseProto_descriptor, + new java.lang.String[] { "Block", }, + org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.UpdateReplicaUnderRecoveryResponseProto.class, + org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.UpdateReplicaUnderRecoveryResponseProto.Builder.class); + return null; + } + }; + com.google.protobuf.Descriptors.FileDescriptor + .internalBuildGeneratedFileFrom(descriptorData, + new com.google.protobuf.Descriptors.FileDescriptor[] { + org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.getDescriptor(), + }, assigner); + } + + // @@protoc_insertion_point(outer_class_scope) +} diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/proto/JournalProtocolProtos.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/proto/JournalProtocolProtos.java new file mode 100644 index 0000000000..74267456b7 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/proto/JournalProtocolProtos.java @@ -0,0 +1,2234 @@ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: JournalProtocol.proto + +package org.apache.hadoop.hdfs.protocol.proto; + +public final class JournalProtocolProtos { + private JournalProtocolProtos() {} + public static void registerAllExtensions( + com.google.protobuf.ExtensionRegistry registry) { + } + public interface JournalRequestProtoOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // required .NamenodeRegistrationProto registration = 1; + boolean hasRegistration(); + org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto getRegistration(); + org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProtoOrBuilder getRegistrationOrBuilder(); + + // required uint64 firstTxnId = 2; + boolean hasFirstTxnId(); + long getFirstTxnId(); + + // required uint32 numTxns = 3; + boolean hasNumTxns(); + int getNumTxns(); + + // required bytes records = 4; + boolean hasRecords(); + com.google.protobuf.ByteString getRecords(); + } + public static final class JournalRequestProto extends + com.google.protobuf.GeneratedMessage + implements JournalRequestProtoOrBuilder { + // Use JournalRequestProto.newBuilder() to construct. + private JournalRequestProto(Builder builder) { + super(builder); + } + private JournalRequestProto(boolean noInit) {} + + private static final JournalRequestProto defaultInstance; + public static JournalRequestProto getDefaultInstance() { + return defaultInstance; + } + + public JournalRequestProto getDefaultInstanceForType() { + return defaultInstance; + } + + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.internal_static_JournalRequestProto_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.internal_static_JournalRequestProto_fieldAccessorTable; + } + + private int bitField0_; + // required .NamenodeRegistrationProto registration = 1; + public static final int REGISTRATION_FIELD_NUMBER = 1; + private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto registration_; + public boolean hasRegistration() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto getRegistration() { + return registration_; + } + public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProtoOrBuilder getRegistrationOrBuilder() { + return registration_; + } + + // required uint64 firstTxnId = 2; + public static final int FIRSTTXNID_FIELD_NUMBER = 2; + private long firstTxnId_; + public boolean hasFirstTxnId() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + public long getFirstTxnId() { + return firstTxnId_; + } + + // required uint32 numTxns = 3; + public static final int NUMTXNS_FIELD_NUMBER = 3; + private int numTxns_; + public boolean hasNumTxns() { + return ((bitField0_ & 0x00000004) == 0x00000004); + } + public int getNumTxns() { + return numTxns_; + } + + // required bytes records = 4; + public static final int RECORDS_FIELD_NUMBER = 4; + private com.google.protobuf.ByteString records_; + public boolean hasRecords() { + return ((bitField0_ & 0x00000008) == 0x00000008); + } + public com.google.protobuf.ByteString getRecords() { + return records_; + } + + private void initFields() { + registration_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto.getDefaultInstance(); + firstTxnId_ = 0L; + numTxns_ = 0; + records_ = com.google.protobuf.ByteString.EMPTY; + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + if (!hasRegistration()) { + memoizedIsInitialized = 0; + return false; + } + if (!hasFirstTxnId()) { + memoizedIsInitialized = 0; + return false; + } + if (!hasNumTxns()) { + memoizedIsInitialized = 0; + return false; + } + if (!hasRecords()) { + memoizedIsInitialized = 0; + return false; + } + if (!getRegistration().isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeMessage(1, registration_); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + output.writeUInt64(2, firstTxnId_); + } + if (((bitField0_ & 0x00000004) == 0x00000004)) { + output.writeUInt32(3, numTxns_); + } + if (((bitField0_ & 0x00000008) == 0x00000008)) { + output.writeBytes(4, records_); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(1, registration_); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + size += com.google.protobuf.CodedOutputStream + .computeUInt64Size(2, firstTxnId_); + } + if (((bitField0_ & 0x00000004) == 0x00000004)) { + size += com.google.protobuf.CodedOutputStream + .computeUInt32Size(3, numTxns_); + } + if (((bitField0_ & 0x00000008) == 0x00000008)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(4, records_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalRequestProto)) { + return super.equals(obj); + } + org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalRequestProto other = (org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalRequestProto) obj; + + boolean result = true; + result = result && (hasRegistration() == other.hasRegistration()); + if (hasRegistration()) { + result = result && getRegistration() + .equals(other.getRegistration()); + } + result = result && (hasFirstTxnId() == other.hasFirstTxnId()); + if (hasFirstTxnId()) { + result = result && (getFirstTxnId() + == other.getFirstTxnId()); + } + result = result && (hasNumTxns() == other.hasNumTxns()); + if (hasNumTxns()) { + result = result && (getNumTxns() + == other.getNumTxns()); + } + result = result && (hasRecords() == other.hasRecords()); + if (hasRecords()) { + result = result && getRecords() + .equals(other.getRecords()); + } + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + @java.lang.Override + public int hashCode() { + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + if (hasRegistration()) { + hash = (37 * hash) + REGISTRATION_FIELD_NUMBER; + hash = (53 * hash) + getRegistration().hashCode(); + } + if (hasFirstTxnId()) { + hash = (37 * hash) + FIRSTTXNID_FIELD_NUMBER; + hash = (53 * hash) + hashLong(getFirstTxnId()); + } + if (hasNumTxns()) { + hash = (37 * hash) + NUMTXNS_FIELD_NUMBER; + hash = (53 * hash) + getNumTxns(); + } + if (hasRecords()) { + hash = (37 * hash) + RECORDS_FIELD_NUMBER; + hash = (53 * hash) + getRecords().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + return hash; + } + + public static org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalRequestProto parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data).buildParsed(); + } + public static org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalRequestProto parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data, extensionRegistry) + .buildParsed(); + } + public static org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalRequestProto parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data).buildParsed(); + } + public static org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalRequestProto parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data, extensionRegistry) + .buildParsed(); + } + public static org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalRequestProto parseFrom(java.io.InputStream input) + throws java.io.IOException { + return newBuilder().mergeFrom(input).buildParsed(); + } + public static org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalRequestProto parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return newBuilder().mergeFrom(input, extensionRegistry) + .buildParsed(); + } + public static org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalRequestProto parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + Builder builder = newBuilder(); + if (builder.mergeDelimitedFrom(input)) { + return builder.buildParsed(); + } else { + return null; + } + } + public static org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalRequestProto parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + Builder builder = newBuilder(); + if (builder.mergeDelimitedFrom(input, extensionRegistry)) { + return builder.buildParsed(); + } else { + return null; + } + } + public static org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalRequestProto parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return newBuilder().mergeFrom(input).buildParsed(); + } + public static org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalRequestProto parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return newBuilder().mergeFrom(input, extensionRegistry) + .buildParsed(); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalRequestProto prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalRequestProtoOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.internal_static_JournalRequestProto_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.internal_static_JournalRequestProto_fieldAccessorTable; + } + + // Construct using org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalRequestProto.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + getRegistrationFieldBuilder(); + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + if (registrationBuilder_ == null) { + registration_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto.getDefaultInstance(); + } else { + registrationBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000001); + firstTxnId_ = 0L; + bitField0_ = (bitField0_ & ~0x00000002); + numTxns_ = 0; + bitField0_ = (bitField0_ & ~0x00000004); + records_ = com.google.protobuf.ByteString.EMPTY; + bitField0_ = (bitField0_ & ~0x00000008); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalRequestProto.getDescriptor(); + } + + public org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalRequestProto getDefaultInstanceForType() { + return org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalRequestProto.getDefaultInstance(); + } + + public org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalRequestProto build() { + org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalRequestProto result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + private org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalRequestProto buildParsed() + throws com.google.protobuf.InvalidProtocolBufferException { + org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalRequestProto result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException( + result).asInvalidProtocolBufferException(); + } + return result; + } + + public org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalRequestProto buildPartial() { + org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalRequestProto result = new org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalRequestProto(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + if (registrationBuilder_ == null) { + result.registration_ = registration_; + } else { + result.registration_ = registrationBuilder_.build(); + } + if (((from_bitField0_ & 0x00000002) == 0x00000002)) { + to_bitField0_ |= 0x00000002; + } + result.firstTxnId_ = firstTxnId_; + if (((from_bitField0_ & 0x00000004) == 0x00000004)) { + to_bitField0_ |= 0x00000004; + } + result.numTxns_ = numTxns_; + if (((from_bitField0_ & 0x00000008) == 0x00000008)) { + to_bitField0_ |= 0x00000008; + } + result.records_ = records_; + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalRequestProto) { + return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalRequestProto)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalRequestProto other) { + if (other == org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalRequestProto.getDefaultInstance()) return this; + if (other.hasRegistration()) { + mergeRegistration(other.getRegistration()); + } + if (other.hasFirstTxnId()) { + setFirstTxnId(other.getFirstTxnId()); + } + if (other.hasNumTxns()) { + setNumTxns(other.getNumTxns()); + } + if (other.hasRecords()) { + setRecords(other.getRecords()); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + if (!hasRegistration()) { + + return false; + } + if (!hasFirstTxnId()) { + + return false; + } + if (!hasNumTxns()) { + + return false; + } + if (!hasRecords()) { + + return false; + } + if (!getRegistration().isInitialized()) { + + return false; + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder( + this.getUnknownFields()); + while (true) { + int tag = input.readTag(); + switch (tag) { + case 0: + this.setUnknownFields(unknownFields.build()); + onChanged(); + return this; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + this.setUnknownFields(unknownFields.build()); + onChanged(); + return this; + } + break; + } + case 10: { + org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto.Builder subBuilder = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto.newBuilder(); + if (hasRegistration()) { + subBuilder.mergeFrom(getRegistration()); + } + input.readMessage(subBuilder, extensionRegistry); + setRegistration(subBuilder.buildPartial()); + break; + } + case 16: { + bitField0_ |= 0x00000002; + firstTxnId_ = input.readUInt64(); + break; + } + case 24: { + bitField0_ |= 0x00000004; + numTxns_ = input.readUInt32(); + break; + } + case 34: { + bitField0_ |= 0x00000008; + records_ = input.readBytes(); + break; + } + } + } + } + + private int bitField0_; + + // required .NamenodeRegistrationProto registration = 1; + private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto registration_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto.getDefaultInstance(); + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProtoOrBuilder> registrationBuilder_; + public boolean hasRegistration() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto getRegistration() { + if (registrationBuilder_ == null) { + return registration_; + } else { + return registrationBuilder_.getMessage(); + } + } + public Builder setRegistration(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto value) { + if (registrationBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + registration_ = value; + onChanged(); + } else { + registrationBuilder_.setMessage(value); + } + bitField0_ |= 0x00000001; + return this; + } + public Builder setRegistration( + org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto.Builder builderForValue) { + if (registrationBuilder_ == null) { + registration_ = builderForValue.build(); + onChanged(); + } else { + registrationBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000001; + return this; + } + public Builder mergeRegistration(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto value) { + if (registrationBuilder_ == null) { + if (((bitField0_ & 0x00000001) == 0x00000001) && + registration_ != org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto.getDefaultInstance()) { + registration_ = + org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto.newBuilder(registration_).mergeFrom(value).buildPartial(); + } else { + registration_ = value; + } + onChanged(); + } else { + registrationBuilder_.mergeFrom(value); + } + bitField0_ |= 0x00000001; + return this; + } + public Builder clearRegistration() { + if (registrationBuilder_ == null) { + registration_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto.getDefaultInstance(); + onChanged(); + } else { + registrationBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000001); + return this; + } + public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto.Builder getRegistrationBuilder() { + bitField0_ |= 0x00000001; + onChanged(); + return getRegistrationFieldBuilder().getBuilder(); + } + public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProtoOrBuilder getRegistrationOrBuilder() { + if (registrationBuilder_ != null) { + return registrationBuilder_.getMessageOrBuilder(); + } else { + return registration_; + } + } + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProtoOrBuilder> + getRegistrationFieldBuilder() { + if (registrationBuilder_ == null) { + registrationBuilder_ = new com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProtoOrBuilder>( + registration_, + getParentForChildren(), + isClean()); + registration_ = null; + } + return registrationBuilder_; + } + + // required uint64 firstTxnId = 2; + private long firstTxnId_ ; + public boolean hasFirstTxnId() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + public long getFirstTxnId() { + return firstTxnId_; + } + public Builder setFirstTxnId(long value) { + bitField0_ |= 0x00000002; + firstTxnId_ = value; + onChanged(); + return this; + } + public Builder clearFirstTxnId() { + bitField0_ = (bitField0_ & ~0x00000002); + firstTxnId_ = 0L; + onChanged(); + return this; + } + + // required uint32 numTxns = 3; + private int numTxns_ ; + public boolean hasNumTxns() { + return ((bitField0_ & 0x00000004) == 0x00000004); + } + public int getNumTxns() { + return numTxns_; + } + public Builder setNumTxns(int value) { + bitField0_ |= 0x00000004; + numTxns_ = value; + onChanged(); + return this; + } + public Builder clearNumTxns() { + bitField0_ = (bitField0_ & ~0x00000004); + numTxns_ = 0; + onChanged(); + return this; + } + + // required bytes records = 4; + private com.google.protobuf.ByteString records_ = com.google.protobuf.ByteString.EMPTY; + public boolean hasRecords() { + return ((bitField0_ & 0x00000008) == 0x00000008); + } + public com.google.protobuf.ByteString getRecords() { + return records_; + } + public Builder setRecords(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000008; + records_ = value; + onChanged(); + return this; + } + public Builder clearRecords() { + bitField0_ = (bitField0_ & ~0x00000008); + records_ = getDefaultInstance().getRecords(); + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:JournalRequestProto) + } + + static { + defaultInstance = new JournalRequestProto(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:JournalRequestProto) + } + + public interface JournalResponseProtoOrBuilder + extends com.google.protobuf.MessageOrBuilder { + } + public static final class JournalResponseProto extends + com.google.protobuf.GeneratedMessage + implements JournalResponseProtoOrBuilder { + // Use JournalResponseProto.newBuilder() to construct. + private JournalResponseProto(Builder builder) { + super(builder); + } + private JournalResponseProto(boolean noInit) {} + + private static final JournalResponseProto defaultInstance; + public static JournalResponseProto getDefaultInstance() { + return defaultInstance; + } + + public JournalResponseProto getDefaultInstanceForType() { + return defaultInstance; + } + + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.internal_static_JournalResponseProto_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.internal_static_JournalResponseProto_fieldAccessorTable; + } + + private void initFields() { + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalResponseProto)) { + return super.equals(obj); + } + org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalResponseProto other = (org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalResponseProto) obj; + + boolean result = true; + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + @java.lang.Override + public int hashCode() { + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + hash = (29 * hash) + getUnknownFields().hashCode(); + return hash; + } + + public static org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalResponseProto parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data).buildParsed(); + } + public static org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalResponseProto parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data, extensionRegistry) + .buildParsed(); + } + public static org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalResponseProto parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data).buildParsed(); + } + public static org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalResponseProto parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data, extensionRegistry) + .buildParsed(); + } + public static org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalResponseProto parseFrom(java.io.InputStream input) + throws java.io.IOException { + return newBuilder().mergeFrom(input).buildParsed(); + } + public static org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalResponseProto parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return newBuilder().mergeFrom(input, extensionRegistry) + .buildParsed(); + } + public static org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalResponseProto parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + Builder builder = newBuilder(); + if (builder.mergeDelimitedFrom(input)) { + return builder.buildParsed(); + } else { + return null; + } + } + public static org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalResponseProto parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + Builder builder = newBuilder(); + if (builder.mergeDelimitedFrom(input, extensionRegistry)) { + return builder.buildParsed(); + } else { + return null; + } + } + public static org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalResponseProto parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return newBuilder().mergeFrom(input).buildParsed(); + } + public static org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalResponseProto parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return newBuilder().mergeFrom(input, extensionRegistry) + .buildParsed(); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalResponseProto prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalResponseProtoOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.internal_static_JournalResponseProto_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.internal_static_JournalResponseProto_fieldAccessorTable; + } + + // Construct using org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalResponseProto.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalResponseProto.getDescriptor(); + } + + public org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalResponseProto getDefaultInstanceForType() { + return org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalResponseProto.getDefaultInstance(); + } + + public org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalResponseProto build() { + org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalResponseProto result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + private org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalResponseProto buildParsed() + throws com.google.protobuf.InvalidProtocolBufferException { + org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalResponseProto result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException( + result).asInvalidProtocolBufferException(); + } + return result; + } + + public org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalResponseProto buildPartial() { + org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalResponseProto result = new org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalResponseProto(this); + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalResponseProto) { + return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalResponseProto)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalResponseProto other) { + if (other == org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalResponseProto.getDefaultInstance()) return this; + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder( + this.getUnknownFields()); + while (true) { + int tag = input.readTag(); + switch (tag) { + case 0: + this.setUnknownFields(unknownFields.build()); + onChanged(); + return this; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + this.setUnknownFields(unknownFields.build()); + onChanged(); + return this; + } + break; + } + } + } + } + + + // @@protoc_insertion_point(builder_scope:JournalResponseProto) + } + + static { + defaultInstance = new JournalResponseProto(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:JournalResponseProto) + } + + public interface StartLogSegmentRequestProtoOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // required .NamenodeRegistrationProto registration = 1; + boolean hasRegistration(); + org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto getRegistration(); + org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProtoOrBuilder getRegistrationOrBuilder(); + + // required uint64 txid = 2; + boolean hasTxid(); + long getTxid(); + } + public static final class StartLogSegmentRequestProto extends + com.google.protobuf.GeneratedMessage + implements StartLogSegmentRequestProtoOrBuilder { + // Use StartLogSegmentRequestProto.newBuilder() to construct. + private StartLogSegmentRequestProto(Builder builder) { + super(builder); + } + private StartLogSegmentRequestProto(boolean noInit) {} + + private static final StartLogSegmentRequestProto defaultInstance; + public static StartLogSegmentRequestProto getDefaultInstance() { + return defaultInstance; + } + + public StartLogSegmentRequestProto getDefaultInstanceForType() { + return defaultInstance; + } + + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.internal_static_StartLogSegmentRequestProto_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.internal_static_StartLogSegmentRequestProto_fieldAccessorTable; + } + + private int bitField0_; + // required .NamenodeRegistrationProto registration = 1; + public static final int REGISTRATION_FIELD_NUMBER = 1; + private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto registration_; + public boolean hasRegistration() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto getRegistration() { + return registration_; + } + public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProtoOrBuilder getRegistrationOrBuilder() { + return registration_; + } + + // required uint64 txid = 2; + public static final int TXID_FIELD_NUMBER = 2; + private long txid_; + public boolean hasTxid() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + public long getTxid() { + return txid_; + } + + private void initFields() { + registration_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto.getDefaultInstance(); + txid_ = 0L; + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + if (!hasRegistration()) { + memoizedIsInitialized = 0; + return false; + } + if (!hasTxid()) { + memoizedIsInitialized = 0; + return false; + } + if (!getRegistration().isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeMessage(1, registration_); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + output.writeUInt64(2, txid_); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(1, registration_); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + size += com.google.protobuf.CodedOutputStream + .computeUInt64Size(2, txid_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.StartLogSegmentRequestProto)) { + return super.equals(obj); + } + org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.StartLogSegmentRequestProto other = (org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.StartLogSegmentRequestProto) obj; + + boolean result = true; + result = result && (hasRegistration() == other.hasRegistration()); + if (hasRegistration()) { + result = result && getRegistration() + .equals(other.getRegistration()); + } + result = result && (hasTxid() == other.hasTxid()); + if (hasTxid()) { + result = result && (getTxid() + == other.getTxid()); + } + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + @java.lang.Override + public int hashCode() { + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + if (hasRegistration()) { + hash = (37 * hash) + REGISTRATION_FIELD_NUMBER; + hash = (53 * hash) + getRegistration().hashCode(); + } + if (hasTxid()) { + hash = (37 * hash) + TXID_FIELD_NUMBER; + hash = (53 * hash) + hashLong(getTxid()); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + return hash; + } + + public static org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.StartLogSegmentRequestProto parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data).buildParsed(); + } + public static org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.StartLogSegmentRequestProto parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data, extensionRegistry) + .buildParsed(); + } + public static org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.StartLogSegmentRequestProto parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data).buildParsed(); + } + public static org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.StartLogSegmentRequestProto parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data, extensionRegistry) + .buildParsed(); + } + public static org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.StartLogSegmentRequestProto parseFrom(java.io.InputStream input) + throws java.io.IOException { + return newBuilder().mergeFrom(input).buildParsed(); + } + public static org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.StartLogSegmentRequestProto parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return newBuilder().mergeFrom(input, extensionRegistry) + .buildParsed(); + } + public static org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.StartLogSegmentRequestProto parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + Builder builder = newBuilder(); + if (builder.mergeDelimitedFrom(input)) { + return builder.buildParsed(); + } else { + return null; + } + } + public static org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.StartLogSegmentRequestProto parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + Builder builder = newBuilder(); + if (builder.mergeDelimitedFrom(input, extensionRegistry)) { + return builder.buildParsed(); + } else { + return null; + } + } + public static org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.StartLogSegmentRequestProto parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return newBuilder().mergeFrom(input).buildParsed(); + } + public static org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.StartLogSegmentRequestProto parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return newBuilder().mergeFrom(input, extensionRegistry) + .buildParsed(); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.StartLogSegmentRequestProto prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.StartLogSegmentRequestProtoOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.internal_static_StartLogSegmentRequestProto_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.internal_static_StartLogSegmentRequestProto_fieldAccessorTable; + } + + // Construct using org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.StartLogSegmentRequestProto.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + getRegistrationFieldBuilder(); + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + if (registrationBuilder_ == null) { + registration_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto.getDefaultInstance(); + } else { + registrationBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000001); + txid_ = 0L; + bitField0_ = (bitField0_ & ~0x00000002); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.StartLogSegmentRequestProto.getDescriptor(); + } + + public org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.StartLogSegmentRequestProto getDefaultInstanceForType() { + return org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.StartLogSegmentRequestProto.getDefaultInstance(); + } + + public org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.StartLogSegmentRequestProto build() { + org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.StartLogSegmentRequestProto result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + private org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.StartLogSegmentRequestProto buildParsed() + throws com.google.protobuf.InvalidProtocolBufferException { + org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.StartLogSegmentRequestProto result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException( + result).asInvalidProtocolBufferException(); + } + return result; + } + + public org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.StartLogSegmentRequestProto buildPartial() { + org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.StartLogSegmentRequestProto result = new org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.StartLogSegmentRequestProto(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + if (registrationBuilder_ == null) { + result.registration_ = registration_; + } else { + result.registration_ = registrationBuilder_.build(); + } + if (((from_bitField0_ & 0x00000002) == 0x00000002)) { + to_bitField0_ |= 0x00000002; + } + result.txid_ = txid_; + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.StartLogSegmentRequestProto) { + return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.StartLogSegmentRequestProto)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.StartLogSegmentRequestProto other) { + if (other == org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.StartLogSegmentRequestProto.getDefaultInstance()) return this; + if (other.hasRegistration()) { + mergeRegistration(other.getRegistration()); + } + if (other.hasTxid()) { + setTxid(other.getTxid()); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + if (!hasRegistration()) { + + return false; + } + if (!hasTxid()) { + + return false; + } + if (!getRegistration().isInitialized()) { + + return false; + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder( + this.getUnknownFields()); + while (true) { + int tag = input.readTag(); + switch (tag) { + case 0: + this.setUnknownFields(unknownFields.build()); + onChanged(); + return this; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + this.setUnknownFields(unknownFields.build()); + onChanged(); + return this; + } + break; + } + case 10: { + org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto.Builder subBuilder = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto.newBuilder(); + if (hasRegistration()) { + subBuilder.mergeFrom(getRegistration()); + } + input.readMessage(subBuilder, extensionRegistry); + setRegistration(subBuilder.buildPartial()); + break; + } + case 16: { + bitField0_ |= 0x00000002; + txid_ = input.readUInt64(); + break; + } + } + } + } + + private int bitField0_; + + // required .NamenodeRegistrationProto registration = 1; + private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto registration_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto.getDefaultInstance(); + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProtoOrBuilder> registrationBuilder_; + public boolean hasRegistration() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto getRegistration() { + if (registrationBuilder_ == null) { + return registration_; + } else { + return registrationBuilder_.getMessage(); + } + } + public Builder setRegistration(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto value) { + if (registrationBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + registration_ = value; + onChanged(); + } else { + registrationBuilder_.setMessage(value); + } + bitField0_ |= 0x00000001; + return this; + } + public Builder setRegistration( + org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto.Builder builderForValue) { + if (registrationBuilder_ == null) { + registration_ = builderForValue.build(); + onChanged(); + } else { + registrationBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000001; + return this; + } + public Builder mergeRegistration(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto value) { + if (registrationBuilder_ == null) { + if (((bitField0_ & 0x00000001) == 0x00000001) && + registration_ != org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto.getDefaultInstance()) { + registration_ = + org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto.newBuilder(registration_).mergeFrom(value).buildPartial(); + } else { + registration_ = value; + } + onChanged(); + } else { + registrationBuilder_.mergeFrom(value); + } + bitField0_ |= 0x00000001; + return this; + } + public Builder clearRegistration() { + if (registrationBuilder_ == null) { + registration_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto.getDefaultInstance(); + onChanged(); + } else { + registrationBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000001); + return this; + } + public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto.Builder getRegistrationBuilder() { + bitField0_ |= 0x00000001; + onChanged(); + return getRegistrationFieldBuilder().getBuilder(); + } + public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProtoOrBuilder getRegistrationOrBuilder() { + if (registrationBuilder_ != null) { + return registrationBuilder_.getMessageOrBuilder(); + } else { + return registration_; + } + } + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProtoOrBuilder> + getRegistrationFieldBuilder() { + if (registrationBuilder_ == null) { + registrationBuilder_ = new com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProtoOrBuilder>( + registration_, + getParentForChildren(), + isClean()); + registration_ = null; + } + return registrationBuilder_; + } + + // required uint64 txid = 2; + private long txid_ ; + public boolean hasTxid() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + public long getTxid() { + return txid_; + } + public Builder setTxid(long value) { + bitField0_ |= 0x00000002; + txid_ = value; + onChanged(); + return this; + } + public Builder clearTxid() { + bitField0_ = (bitField0_ & ~0x00000002); + txid_ = 0L; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:StartLogSegmentRequestProto) + } + + static { + defaultInstance = new StartLogSegmentRequestProto(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:StartLogSegmentRequestProto) + } + + public interface StartLogSegmentResponseProtoOrBuilder + extends com.google.protobuf.MessageOrBuilder { + } + public static final class StartLogSegmentResponseProto extends + com.google.protobuf.GeneratedMessage + implements StartLogSegmentResponseProtoOrBuilder { + // Use StartLogSegmentResponseProto.newBuilder() to construct. + private StartLogSegmentResponseProto(Builder builder) { + super(builder); + } + private StartLogSegmentResponseProto(boolean noInit) {} + + private static final StartLogSegmentResponseProto defaultInstance; + public static StartLogSegmentResponseProto getDefaultInstance() { + return defaultInstance; + } + + public StartLogSegmentResponseProto getDefaultInstanceForType() { + return defaultInstance; + } + + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.internal_static_StartLogSegmentResponseProto_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.internal_static_StartLogSegmentResponseProto_fieldAccessorTable; + } + + private void initFields() { + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.StartLogSegmentResponseProto)) { + return super.equals(obj); + } + org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.StartLogSegmentResponseProto other = (org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.StartLogSegmentResponseProto) obj; + + boolean result = true; + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + @java.lang.Override + public int hashCode() { + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + hash = (29 * hash) + getUnknownFields().hashCode(); + return hash; + } + + public static org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.StartLogSegmentResponseProto parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data).buildParsed(); + } + public static org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.StartLogSegmentResponseProto parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data, extensionRegistry) + .buildParsed(); + } + public static org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.StartLogSegmentResponseProto parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data).buildParsed(); + } + public static org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.StartLogSegmentResponseProto parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data, extensionRegistry) + .buildParsed(); + } + public static org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.StartLogSegmentResponseProto parseFrom(java.io.InputStream input) + throws java.io.IOException { + return newBuilder().mergeFrom(input).buildParsed(); + } + public static org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.StartLogSegmentResponseProto parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return newBuilder().mergeFrom(input, extensionRegistry) + .buildParsed(); + } + public static org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.StartLogSegmentResponseProto parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + Builder builder = newBuilder(); + if (builder.mergeDelimitedFrom(input)) { + return builder.buildParsed(); + } else { + return null; + } + } + public static org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.StartLogSegmentResponseProto parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + Builder builder = newBuilder(); + if (builder.mergeDelimitedFrom(input, extensionRegistry)) { + return builder.buildParsed(); + } else { + return null; + } + } + public static org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.StartLogSegmentResponseProto parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return newBuilder().mergeFrom(input).buildParsed(); + } + public static org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.StartLogSegmentResponseProto parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return newBuilder().mergeFrom(input, extensionRegistry) + .buildParsed(); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.StartLogSegmentResponseProto prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.StartLogSegmentResponseProtoOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.internal_static_StartLogSegmentResponseProto_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.internal_static_StartLogSegmentResponseProto_fieldAccessorTable; + } + + // Construct using org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.StartLogSegmentResponseProto.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.StartLogSegmentResponseProto.getDescriptor(); + } + + public org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.StartLogSegmentResponseProto getDefaultInstanceForType() { + return org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.StartLogSegmentResponseProto.getDefaultInstance(); + } + + public org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.StartLogSegmentResponseProto build() { + org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.StartLogSegmentResponseProto result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + private org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.StartLogSegmentResponseProto buildParsed() + throws com.google.protobuf.InvalidProtocolBufferException { + org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.StartLogSegmentResponseProto result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException( + result).asInvalidProtocolBufferException(); + } + return result; + } + + public org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.StartLogSegmentResponseProto buildPartial() { + org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.StartLogSegmentResponseProto result = new org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.StartLogSegmentResponseProto(this); + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.StartLogSegmentResponseProto) { + return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.StartLogSegmentResponseProto)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.StartLogSegmentResponseProto other) { + if (other == org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.StartLogSegmentResponseProto.getDefaultInstance()) return this; + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder( + this.getUnknownFields()); + while (true) { + int tag = input.readTag(); + switch (tag) { + case 0: + this.setUnknownFields(unknownFields.build()); + onChanged(); + return this; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + this.setUnknownFields(unknownFields.build()); + onChanged(); + return this; + } + break; + } + } + } + } + + + // @@protoc_insertion_point(builder_scope:StartLogSegmentResponseProto) + } + + static { + defaultInstance = new StartLogSegmentResponseProto(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:StartLogSegmentResponseProto) + } + + public static abstract class JournalProtocolService + implements com.google.protobuf.Service { + protected JournalProtocolService() {} + + public interface Interface { + public abstract void journal( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalRequestProto request, + com.google.protobuf.RpcCallback done); + + public abstract void startLogSegment( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.StartLogSegmentRequestProto request, + com.google.protobuf.RpcCallback done); + + } + + public static com.google.protobuf.Service newReflectiveService( + final Interface impl) { + return new JournalProtocolService() { + @java.lang.Override + public void journal( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalRequestProto request, + com.google.protobuf.RpcCallback done) { + impl.journal(controller, request, done); + } + + @java.lang.Override + public void startLogSegment( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.StartLogSegmentRequestProto request, + com.google.protobuf.RpcCallback done) { + impl.startLogSegment(controller, request, done); + } + + }; + } + + public static com.google.protobuf.BlockingService + newReflectiveBlockingService(final BlockingInterface impl) { + return new com.google.protobuf.BlockingService() { + public final com.google.protobuf.Descriptors.ServiceDescriptor + getDescriptorForType() { + return getDescriptor(); + } + + public final com.google.protobuf.Message callBlockingMethod( + com.google.protobuf.Descriptors.MethodDescriptor method, + com.google.protobuf.RpcController controller, + com.google.protobuf.Message request) + throws com.google.protobuf.ServiceException { + if (method.getService() != getDescriptor()) { + throw new java.lang.IllegalArgumentException( + "Service.callBlockingMethod() given method descriptor for " + + "wrong service type."); + } + switch(method.getIndex()) { + case 0: + return impl.journal(controller, (org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalRequestProto)request); + case 1: + return impl.startLogSegment(controller, (org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.StartLogSegmentRequestProto)request); + default: + throw new java.lang.AssertionError("Can't get here."); + } + } + + public final com.google.protobuf.Message + getRequestPrototype( + com.google.protobuf.Descriptors.MethodDescriptor method) { + if (method.getService() != getDescriptor()) { + throw new java.lang.IllegalArgumentException( + "Service.getRequestPrototype() given method " + + "descriptor for wrong service type."); + } + switch(method.getIndex()) { + case 0: + return org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalRequestProto.getDefaultInstance(); + case 1: + return org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.StartLogSegmentRequestProto.getDefaultInstance(); + default: + throw new java.lang.AssertionError("Can't get here."); + } + } + + public final com.google.protobuf.Message + getResponsePrototype( + com.google.protobuf.Descriptors.MethodDescriptor method) { + if (method.getService() != getDescriptor()) { + throw new java.lang.IllegalArgumentException( + "Service.getResponsePrototype() given method " + + "descriptor for wrong service type."); + } + switch(method.getIndex()) { + case 0: + return org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalResponseProto.getDefaultInstance(); + case 1: + return org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.StartLogSegmentResponseProto.getDefaultInstance(); + default: + throw new java.lang.AssertionError("Can't get here."); + } + } + + }; + } + + public abstract void journal( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalRequestProto request, + com.google.protobuf.RpcCallback done); + + public abstract void startLogSegment( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.StartLogSegmentRequestProto request, + com.google.protobuf.RpcCallback done); + + public static final + com.google.protobuf.Descriptors.ServiceDescriptor + getDescriptor() { + return org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.getDescriptor().getServices().get(0); + } + public final com.google.protobuf.Descriptors.ServiceDescriptor + getDescriptorForType() { + return getDescriptor(); + } + + public final void callMethod( + com.google.protobuf.Descriptors.MethodDescriptor method, + com.google.protobuf.RpcController controller, + com.google.protobuf.Message request, + com.google.protobuf.RpcCallback< + com.google.protobuf.Message> done) { + if (method.getService() != getDescriptor()) { + throw new java.lang.IllegalArgumentException( + "Service.callMethod() given method descriptor for wrong " + + "service type."); + } + switch(method.getIndex()) { + case 0: + this.journal(controller, (org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalRequestProto)request, + com.google.protobuf.RpcUtil.specializeCallback( + done)); + return; + case 1: + this.startLogSegment(controller, (org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.StartLogSegmentRequestProto)request, + com.google.protobuf.RpcUtil.specializeCallback( + done)); + return; + default: + throw new java.lang.AssertionError("Can't get here."); + } + } + + public final com.google.protobuf.Message + getRequestPrototype( + com.google.protobuf.Descriptors.MethodDescriptor method) { + if (method.getService() != getDescriptor()) { + throw new java.lang.IllegalArgumentException( + "Service.getRequestPrototype() given method " + + "descriptor for wrong service type."); + } + switch(method.getIndex()) { + case 0: + return org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalRequestProto.getDefaultInstance(); + case 1: + return org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.StartLogSegmentRequestProto.getDefaultInstance(); + default: + throw new java.lang.AssertionError("Can't get here."); + } + } + + public final com.google.protobuf.Message + getResponsePrototype( + com.google.protobuf.Descriptors.MethodDescriptor method) { + if (method.getService() != getDescriptor()) { + throw new java.lang.IllegalArgumentException( + "Service.getResponsePrototype() given method " + + "descriptor for wrong service type."); + } + switch(method.getIndex()) { + case 0: + return org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalResponseProto.getDefaultInstance(); + case 1: + return org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.StartLogSegmentResponseProto.getDefaultInstance(); + default: + throw new java.lang.AssertionError("Can't get here."); + } + } + + public static Stub newStub( + com.google.protobuf.RpcChannel channel) { + return new Stub(channel); + } + + public static final class Stub extends org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalProtocolService implements Interface { + private Stub(com.google.protobuf.RpcChannel channel) { + this.channel = channel; + } + + private final com.google.protobuf.RpcChannel channel; + + public com.google.protobuf.RpcChannel getChannel() { + return channel; + } + + public void journal( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalRequestProto request, + com.google.protobuf.RpcCallback done) { + channel.callMethod( + getDescriptor().getMethods().get(0), + controller, + request, + org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalResponseProto.getDefaultInstance(), + com.google.protobuf.RpcUtil.generalizeCallback( + done, + org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalResponseProto.class, + org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalResponseProto.getDefaultInstance())); + } + + public void startLogSegment( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.StartLogSegmentRequestProto request, + com.google.protobuf.RpcCallback done) { + channel.callMethod( + getDescriptor().getMethods().get(1), + controller, + request, + org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.StartLogSegmentResponseProto.getDefaultInstance(), + com.google.protobuf.RpcUtil.generalizeCallback( + done, + org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.StartLogSegmentResponseProto.class, + org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.StartLogSegmentResponseProto.getDefaultInstance())); + } + } + + public static BlockingInterface newBlockingStub( + com.google.protobuf.BlockingRpcChannel channel) { + return new BlockingStub(channel); + } + + public interface BlockingInterface { + public org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalResponseProto journal( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalRequestProto request) + throws com.google.protobuf.ServiceException; + + public org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.StartLogSegmentResponseProto startLogSegment( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.StartLogSegmentRequestProto request) + throws com.google.protobuf.ServiceException; + } + + private static final class BlockingStub implements BlockingInterface { + private BlockingStub(com.google.protobuf.BlockingRpcChannel channel) { + this.channel = channel; + } + + private final com.google.protobuf.BlockingRpcChannel channel; + + public org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalResponseProto journal( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalRequestProto request) + throws com.google.protobuf.ServiceException { + return (org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalResponseProto) channel.callBlockingMethod( + getDescriptor().getMethods().get(0), + controller, + request, + org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalResponseProto.getDefaultInstance()); + } + + + public org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.StartLogSegmentResponseProto startLogSegment( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.StartLogSegmentRequestProto request) + throws com.google.protobuf.ServiceException { + return (org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.StartLogSegmentResponseProto) channel.callBlockingMethod( + getDescriptor().getMethods().get(1), + controller, + request, + org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.StartLogSegmentResponseProto.getDefaultInstance()); + } + + } + } + + private static com.google.protobuf.Descriptors.Descriptor + internal_static_JournalRequestProto_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_JournalRequestProto_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_JournalResponseProto_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_JournalResponseProto_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_StartLogSegmentRequestProto_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_StartLogSegmentRequestProto_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_StartLogSegmentResponseProto_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_StartLogSegmentResponseProto_fieldAccessorTable; + + public static com.google.protobuf.Descriptors.FileDescriptor + getDescriptor() { + return descriptor; + } + private static com.google.protobuf.Descriptors.FileDescriptor + descriptor; + static { + java.lang.String[] descriptorData = { + "\n\025JournalProtocol.proto\032\nhdfs.proto\"}\n\023J" + + "ournalRequestProto\0220\n\014registration\030\001 \002(\013" + + "2\032.NamenodeRegistrationProto\022\022\n\nfirstTxn" + + "Id\030\002 \002(\004\022\017\n\007numTxns\030\003 \002(\r\022\017\n\007records\030\004 \002" + + "(\014\"\026\n\024JournalResponseProto\"]\n\033StartLogSe" + + "gmentRequestProto\0220\n\014registration\030\001 \002(\0132" + + "\032.NamenodeRegistrationProto\022\014\n\004txid\030\002 \002(" + + "\004\"\036\n\034StartLogSegmentResponseProto2\240\001\n\026Jo" + + "urnalProtocolService\0226\n\007journal\022\024.Journa" + + "lRequestProto\032\025.JournalResponseProto\022N\n\017", + "startLogSegment\022\034.StartLogSegmentRequest" + + "Proto\032\035.StartLogSegmentResponseProtoBD\n%" + + "org.apache.hadoop.hdfs.protocol.protoB\025J" + + "ournalProtocolProtos\210\001\001\240\001\001" + }; + com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner = + new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() { + public com.google.protobuf.ExtensionRegistry assignDescriptors( + com.google.protobuf.Descriptors.FileDescriptor root) { + descriptor = root; + internal_static_JournalRequestProto_descriptor = + getDescriptor().getMessageTypes().get(0); + internal_static_JournalRequestProto_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_JournalRequestProto_descriptor, + new java.lang.String[] { "Registration", "FirstTxnId", "NumTxns", "Records", }, + org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalRequestProto.class, + org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalRequestProto.Builder.class); + internal_static_JournalResponseProto_descriptor = + getDescriptor().getMessageTypes().get(1); + internal_static_JournalResponseProto_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_JournalResponseProto_descriptor, + new java.lang.String[] { }, + org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalResponseProto.class, + org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalResponseProto.Builder.class); + internal_static_StartLogSegmentRequestProto_descriptor = + getDescriptor().getMessageTypes().get(2); + internal_static_StartLogSegmentRequestProto_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_StartLogSegmentRequestProto_descriptor, + new java.lang.String[] { "Registration", "Txid", }, + org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.StartLogSegmentRequestProto.class, + org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.StartLogSegmentRequestProto.Builder.class); + internal_static_StartLogSegmentResponseProto_descriptor = + getDescriptor().getMessageTypes().get(3); + internal_static_StartLogSegmentResponseProto_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_StartLogSegmentResponseProto_descriptor, + new java.lang.String[] { }, + org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.StartLogSegmentResponseProto.class, + org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.StartLogSegmentResponseProto.Builder.class); + return null; + } + }; + com.google.protobuf.Descriptors.FileDescriptor + .internalBuildGeneratedFileFrom(descriptorData, + new com.google.protobuf.Descriptors.FileDescriptor[] { + org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.getDescriptor(), + }, assigner); + } + + // @@protoc_insertion_point(outer_class_scope) +} diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/proto/NamenodeProtocolProtos.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/proto/NamenodeProtocolProtos.java new file mode 100644 index 0000000000..f3064700d1 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/proto/NamenodeProtocolProtos.java @@ -0,0 +1,8949 @@ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: NamenodeProtocol.proto + +package org.apache.hadoop.hdfs.protocol.proto; + +public final class NamenodeProtocolProtos { + private NamenodeProtocolProtos() {} + public static void registerAllExtensions( + com.google.protobuf.ExtensionRegistry registry) { + } + public interface GetBlocksRequestProtoOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // required .DatanodeIDProto datanode = 1; + boolean hasDatanode(); + org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto getDatanode(); + org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProtoOrBuilder getDatanodeOrBuilder(); + + // required uint64 size = 2; + boolean hasSize(); + long getSize(); + } + public static final class GetBlocksRequestProto extends + com.google.protobuf.GeneratedMessage + implements GetBlocksRequestProtoOrBuilder { + // Use GetBlocksRequestProto.newBuilder() to construct. + private GetBlocksRequestProto(Builder builder) { + super(builder); + } + private GetBlocksRequestProto(boolean noInit) {} + + private static final GetBlocksRequestProto defaultInstance; + public static GetBlocksRequestProto getDefaultInstance() { + return defaultInstance; + } + + public GetBlocksRequestProto getDefaultInstanceForType() { + return defaultInstance; + } + + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.internal_static_GetBlocksRequestProto_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.internal_static_GetBlocksRequestProto_fieldAccessorTable; + } + + private int bitField0_; + // required .DatanodeIDProto datanode = 1; + public static final int DATANODE_FIELD_NUMBER = 1; + private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto datanode_; + public boolean hasDatanode() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto getDatanode() { + return datanode_; + } + public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProtoOrBuilder getDatanodeOrBuilder() { + return datanode_; + } + + // required uint64 size = 2; + public static final int SIZE_FIELD_NUMBER = 2; + private long size_; + public boolean hasSize() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + public long getSize() { + return size_; + } + + private void initFields() { + datanode_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.getDefaultInstance(); + size_ = 0L; + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + if (!hasDatanode()) { + memoizedIsInitialized = 0; + return false; + } + if (!hasSize()) { + memoizedIsInitialized = 0; + return false; + } + if (!getDatanode().isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeMessage(1, datanode_); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + output.writeUInt64(2, size_); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(1, datanode_); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + size += com.google.protobuf.CodedOutputStream + .computeUInt64Size(2, size_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlocksRequestProto)) { + return super.equals(obj); + } + org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlocksRequestProto other = (org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlocksRequestProto) obj; + + boolean result = true; + result = result && (hasDatanode() == other.hasDatanode()); + if (hasDatanode()) { + result = result && getDatanode() + .equals(other.getDatanode()); + } + result = result && (hasSize() == other.hasSize()); + if (hasSize()) { + result = result && (getSize() + == other.getSize()); + } + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + @java.lang.Override + public int hashCode() { + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + if (hasDatanode()) { + hash = (37 * hash) + DATANODE_FIELD_NUMBER; + hash = (53 * hash) + getDatanode().hashCode(); + } + if (hasSize()) { + hash = (37 * hash) + SIZE_FIELD_NUMBER; + hash = (53 * hash) + hashLong(getSize()); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + return hash; + } + + public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlocksRequestProto parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data).buildParsed(); + } + public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlocksRequestProto parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data, extensionRegistry) + .buildParsed(); + } + public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlocksRequestProto parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data).buildParsed(); + } + public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlocksRequestProto parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data, extensionRegistry) + .buildParsed(); + } + public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlocksRequestProto parseFrom(java.io.InputStream input) + throws java.io.IOException { + return newBuilder().mergeFrom(input).buildParsed(); + } + public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlocksRequestProto parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return newBuilder().mergeFrom(input, extensionRegistry) + .buildParsed(); + } + public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlocksRequestProto parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + Builder builder = newBuilder(); + if (builder.mergeDelimitedFrom(input)) { + return builder.buildParsed(); + } else { + return null; + } + } + public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlocksRequestProto parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + Builder builder = newBuilder(); + if (builder.mergeDelimitedFrom(input, extensionRegistry)) { + return builder.buildParsed(); + } else { + return null; + } + } + public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlocksRequestProto parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return newBuilder().mergeFrom(input).buildParsed(); + } + public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlocksRequestProto parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return newBuilder().mergeFrom(input, extensionRegistry) + .buildParsed(); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlocksRequestProto prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlocksRequestProtoOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.internal_static_GetBlocksRequestProto_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.internal_static_GetBlocksRequestProto_fieldAccessorTable; + } + + // Construct using org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlocksRequestProto.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + getDatanodeFieldBuilder(); + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + if (datanodeBuilder_ == null) { + datanode_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.getDefaultInstance(); + } else { + datanodeBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000001); + size_ = 0L; + bitField0_ = (bitField0_ & ~0x00000002); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlocksRequestProto.getDescriptor(); + } + + public org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlocksRequestProto getDefaultInstanceForType() { + return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlocksRequestProto.getDefaultInstance(); + } + + public org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlocksRequestProto build() { + org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlocksRequestProto result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + private org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlocksRequestProto buildParsed() + throws com.google.protobuf.InvalidProtocolBufferException { + org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlocksRequestProto result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException( + result).asInvalidProtocolBufferException(); + } + return result; + } + + public org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlocksRequestProto buildPartial() { + org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlocksRequestProto result = new org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlocksRequestProto(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + if (datanodeBuilder_ == null) { + result.datanode_ = datanode_; + } else { + result.datanode_ = datanodeBuilder_.build(); + } + if (((from_bitField0_ & 0x00000002) == 0x00000002)) { + to_bitField0_ |= 0x00000002; + } + result.size_ = size_; + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlocksRequestProto) { + return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlocksRequestProto)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlocksRequestProto other) { + if (other == org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlocksRequestProto.getDefaultInstance()) return this; + if (other.hasDatanode()) { + mergeDatanode(other.getDatanode()); + } + if (other.hasSize()) { + setSize(other.getSize()); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + if (!hasDatanode()) { + + return false; + } + if (!hasSize()) { + + return false; + } + if (!getDatanode().isInitialized()) { + + return false; + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder( + this.getUnknownFields()); + while (true) { + int tag = input.readTag(); + switch (tag) { + case 0: + this.setUnknownFields(unknownFields.build()); + onChanged(); + return this; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + this.setUnknownFields(unknownFields.build()); + onChanged(); + return this; + } + break; + } + case 10: { + org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.Builder subBuilder = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.newBuilder(); + if (hasDatanode()) { + subBuilder.mergeFrom(getDatanode()); + } + input.readMessage(subBuilder, extensionRegistry); + setDatanode(subBuilder.buildPartial()); + break; + } + case 16: { + bitField0_ |= 0x00000002; + size_ = input.readUInt64(); + break; + } + } + } + } + + private int bitField0_; + + // required .DatanodeIDProto datanode = 1; + private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto datanode_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.getDefaultInstance(); + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProtoOrBuilder> datanodeBuilder_; + public boolean hasDatanode() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto getDatanode() { + if (datanodeBuilder_ == null) { + return datanode_; + } else { + return datanodeBuilder_.getMessage(); + } + } + public Builder setDatanode(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto value) { + if (datanodeBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + datanode_ = value; + onChanged(); + } else { + datanodeBuilder_.setMessage(value); + } + bitField0_ |= 0x00000001; + return this; + } + public Builder setDatanode( + org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.Builder builderForValue) { + if (datanodeBuilder_ == null) { + datanode_ = builderForValue.build(); + onChanged(); + } else { + datanodeBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000001; + return this; + } + public Builder mergeDatanode(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto value) { + if (datanodeBuilder_ == null) { + if (((bitField0_ & 0x00000001) == 0x00000001) && + datanode_ != org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.getDefaultInstance()) { + datanode_ = + org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.newBuilder(datanode_).mergeFrom(value).buildPartial(); + } else { + datanode_ = value; + } + onChanged(); + } else { + datanodeBuilder_.mergeFrom(value); + } + bitField0_ |= 0x00000001; + return this; + } + public Builder clearDatanode() { + if (datanodeBuilder_ == null) { + datanode_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.getDefaultInstance(); + onChanged(); + } else { + datanodeBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000001); + return this; + } + public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.Builder getDatanodeBuilder() { + bitField0_ |= 0x00000001; + onChanged(); + return getDatanodeFieldBuilder().getBuilder(); + } + public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProtoOrBuilder getDatanodeOrBuilder() { + if (datanodeBuilder_ != null) { + return datanodeBuilder_.getMessageOrBuilder(); + } else { + return datanode_; + } + } + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProtoOrBuilder> + getDatanodeFieldBuilder() { + if (datanodeBuilder_ == null) { + datanodeBuilder_ = new com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProtoOrBuilder>( + datanode_, + getParentForChildren(), + isClean()); + datanode_ = null; + } + return datanodeBuilder_; + } + + // required uint64 size = 2; + private long size_ ; + public boolean hasSize() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + public long getSize() { + return size_; + } + public Builder setSize(long value) { + bitField0_ |= 0x00000002; + size_ = value; + onChanged(); + return this; + } + public Builder clearSize() { + bitField0_ = (bitField0_ & ~0x00000002); + size_ = 0L; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:GetBlocksRequestProto) + } + + static { + defaultInstance = new GetBlocksRequestProto(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:GetBlocksRequestProto) + } + + public interface GetBlocksResponseProtoOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // required .BlockWithLocationsProto blocks = 1; + boolean hasBlocks(); + org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockWithLocationsProto getBlocks(); + org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockWithLocationsProtoOrBuilder getBlocksOrBuilder(); + } + public static final class GetBlocksResponseProto extends + com.google.protobuf.GeneratedMessage + implements GetBlocksResponseProtoOrBuilder { + // Use GetBlocksResponseProto.newBuilder() to construct. + private GetBlocksResponseProto(Builder builder) { + super(builder); + } + private GetBlocksResponseProto(boolean noInit) {} + + private static final GetBlocksResponseProto defaultInstance; + public static GetBlocksResponseProto getDefaultInstance() { + return defaultInstance; + } + + public GetBlocksResponseProto getDefaultInstanceForType() { + return defaultInstance; + } + + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.internal_static_GetBlocksResponseProto_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.internal_static_GetBlocksResponseProto_fieldAccessorTable; + } + + private int bitField0_; + // required .BlockWithLocationsProto blocks = 1; + public static final int BLOCKS_FIELD_NUMBER = 1; + private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockWithLocationsProto blocks_; + public boolean hasBlocks() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockWithLocationsProto getBlocks() { + return blocks_; + } + public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockWithLocationsProtoOrBuilder getBlocksOrBuilder() { + return blocks_; + } + + private void initFields() { + blocks_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockWithLocationsProto.getDefaultInstance(); + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + if (!hasBlocks()) { + memoizedIsInitialized = 0; + return false; + } + if (!getBlocks().isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeMessage(1, blocks_); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(1, blocks_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlocksResponseProto)) { + return super.equals(obj); + } + org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlocksResponseProto other = (org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlocksResponseProto) obj; + + boolean result = true; + result = result && (hasBlocks() == other.hasBlocks()); + if (hasBlocks()) { + result = result && getBlocks() + .equals(other.getBlocks()); + } + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + @java.lang.Override + public int hashCode() { + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + if (hasBlocks()) { + hash = (37 * hash) + BLOCKS_FIELD_NUMBER; + hash = (53 * hash) + getBlocks().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + return hash; + } + + public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlocksResponseProto parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data).buildParsed(); + } + public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlocksResponseProto parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data, extensionRegistry) + .buildParsed(); + } + public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlocksResponseProto parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data).buildParsed(); + } + public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlocksResponseProto parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data, extensionRegistry) + .buildParsed(); + } + public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlocksResponseProto parseFrom(java.io.InputStream input) + throws java.io.IOException { + return newBuilder().mergeFrom(input).buildParsed(); + } + public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlocksResponseProto parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return newBuilder().mergeFrom(input, extensionRegistry) + .buildParsed(); + } + public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlocksResponseProto parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + Builder builder = newBuilder(); + if (builder.mergeDelimitedFrom(input)) { + return builder.buildParsed(); + } else { + return null; + } + } + public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlocksResponseProto parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + Builder builder = newBuilder(); + if (builder.mergeDelimitedFrom(input, extensionRegistry)) { + return builder.buildParsed(); + } else { + return null; + } + } + public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlocksResponseProto parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return newBuilder().mergeFrom(input).buildParsed(); + } + public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlocksResponseProto parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return newBuilder().mergeFrom(input, extensionRegistry) + .buildParsed(); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlocksResponseProto prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlocksResponseProtoOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.internal_static_GetBlocksResponseProto_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.internal_static_GetBlocksResponseProto_fieldAccessorTable; + } + + // Construct using org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlocksResponseProto.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + getBlocksFieldBuilder(); + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + if (blocksBuilder_ == null) { + blocks_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockWithLocationsProto.getDefaultInstance(); + } else { + blocksBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000001); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlocksResponseProto.getDescriptor(); + } + + public org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlocksResponseProto getDefaultInstanceForType() { + return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlocksResponseProto.getDefaultInstance(); + } + + public org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlocksResponseProto build() { + org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlocksResponseProto result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + private org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlocksResponseProto buildParsed() + throws com.google.protobuf.InvalidProtocolBufferException { + org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlocksResponseProto result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException( + result).asInvalidProtocolBufferException(); + } + return result; + } + + public org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlocksResponseProto buildPartial() { + org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlocksResponseProto result = new org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlocksResponseProto(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + if (blocksBuilder_ == null) { + result.blocks_ = blocks_; + } else { + result.blocks_ = blocksBuilder_.build(); + } + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlocksResponseProto) { + return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlocksResponseProto)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlocksResponseProto other) { + if (other == org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlocksResponseProto.getDefaultInstance()) return this; + if (other.hasBlocks()) { + mergeBlocks(other.getBlocks()); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + if (!hasBlocks()) { + + return false; + } + if (!getBlocks().isInitialized()) { + + return false; + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder( + this.getUnknownFields()); + while (true) { + int tag = input.readTag(); + switch (tag) { + case 0: + this.setUnknownFields(unknownFields.build()); + onChanged(); + return this; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + this.setUnknownFields(unknownFields.build()); + onChanged(); + return this; + } + break; + } + case 10: { + org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockWithLocationsProto.Builder subBuilder = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockWithLocationsProto.newBuilder(); + if (hasBlocks()) { + subBuilder.mergeFrom(getBlocks()); + } + input.readMessage(subBuilder, extensionRegistry); + setBlocks(subBuilder.buildPartial()); + break; + } + } + } + } + + private int bitField0_; + + // required .BlockWithLocationsProto blocks = 1; + private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockWithLocationsProto blocks_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockWithLocationsProto.getDefaultInstance(); + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockWithLocationsProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockWithLocationsProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockWithLocationsProtoOrBuilder> blocksBuilder_; + public boolean hasBlocks() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockWithLocationsProto getBlocks() { + if (blocksBuilder_ == null) { + return blocks_; + } else { + return blocksBuilder_.getMessage(); + } + } + public Builder setBlocks(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockWithLocationsProto value) { + if (blocksBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + blocks_ = value; + onChanged(); + } else { + blocksBuilder_.setMessage(value); + } + bitField0_ |= 0x00000001; + return this; + } + public Builder setBlocks( + org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockWithLocationsProto.Builder builderForValue) { + if (blocksBuilder_ == null) { + blocks_ = builderForValue.build(); + onChanged(); + } else { + blocksBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000001; + return this; + } + public Builder mergeBlocks(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockWithLocationsProto value) { + if (blocksBuilder_ == null) { + if (((bitField0_ & 0x00000001) == 0x00000001) && + blocks_ != org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockWithLocationsProto.getDefaultInstance()) { + blocks_ = + org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockWithLocationsProto.newBuilder(blocks_).mergeFrom(value).buildPartial(); + } else { + blocks_ = value; + } + onChanged(); + } else { + blocksBuilder_.mergeFrom(value); + } + bitField0_ |= 0x00000001; + return this; + } + public Builder clearBlocks() { + if (blocksBuilder_ == null) { + blocks_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockWithLocationsProto.getDefaultInstance(); + onChanged(); + } else { + blocksBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000001); + return this; + } + public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockWithLocationsProto.Builder getBlocksBuilder() { + bitField0_ |= 0x00000001; + onChanged(); + return getBlocksFieldBuilder().getBuilder(); + } + public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockWithLocationsProtoOrBuilder getBlocksOrBuilder() { + if (blocksBuilder_ != null) { + return blocksBuilder_.getMessageOrBuilder(); + } else { + return blocks_; + } + } + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockWithLocationsProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockWithLocationsProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockWithLocationsProtoOrBuilder> + getBlocksFieldBuilder() { + if (blocksBuilder_ == null) { + blocksBuilder_ = new com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockWithLocationsProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockWithLocationsProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockWithLocationsProtoOrBuilder>( + blocks_, + getParentForChildren(), + isClean()); + blocks_ = null; + } + return blocksBuilder_; + } + + // @@protoc_insertion_point(builder_scope:GetBlocksResponseProto) + } + + static { + defaultInstance = new GetBlocksResponseProto(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:GetBlocksResponseProto) + } + + public interface GetBlockKeysRequestProtoOrBuilder + extends com.google.protobuf.MessageOrBuilder { + } + public static final class GetBlockKeysRequestProto extends + com.google.protobuf.GeneratedMessage + implements GetBlockKeysRequestProtoOrBuilder { + // Use GetBlockKeysRequestProto.newBuilder() to construct. + private GetBlockKeysRequestProto(Builder builder) { + super(builder); + } + private GetBlockKeysRequestProto(boolean noInit) {} + + private static final GetBlockKeysRequestProto defaultInstance; + public static GetBlockKeysRequestProto getDefaultInstance() { + return defaultInstance; + } + + public GetBlockKeysRequestProto getDefaultInstanceForType() { + return defaultInstance; + } + + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.internal_static_GetBlockKeysRequestProto_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.internal_static_GetBlockKeysRequestProto_fieldAccessorTable; + } + + private void initFields() { + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlockKeysRequestProto)) { + return super.equals(obj); + } + org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlockKeysRequestProto other = (org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlockKeysRequestProto) obj; + + boolean result = true; + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + @java.lang.Override + public int hashCode() { + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + hash = (29 * hash) + getUnknownFields().hashCode(); + return hash; + } + + public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlockKeysRequestProto parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data).buildParsed(); + } + public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlockKeysRequestProto parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data, extensionRegistry) + .buildParsed(); + } + public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlockKeysRequestProto parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data).buildParsed(); + } + public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlockKeysRequestProto parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data, extensionRegistry) + .buildParsed(); + } + public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlockKeysRequestProto parseFrom(java.io.InputStream input) + throws java.io.IOException { + return newBuilder().mergeFrom(input).buildParsed(); + } + public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlockKeysRequestProto parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return newBuilder().mergeFrom(input, extensionRegistry) + .buildParsed(); + } + public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlockKeysRequestProto parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + Builder builder = newBuilder(); + if (builder.mergeDelimitedFrom(input)) { + return builder.buildParsed(); + } else { + return null; + } + } + public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlockKeysRequestProto parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + Builder builder = newBuilder(); + if (builder.mergeDelimitedFrom(input, extensionRegistry)) { + return builder.buildParsed(); + } else { + return null; + } + } + public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlockKeysRequestProto parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return newBuilder().mergeFrom(input).buildParsed(); + } + public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlockKeysRequestProto parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return newBuilder().mergeFrom(input, extensionRegistry) + .buildParsed(); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlockKeysRequestProto prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlockKeysRequestProtoOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.internal_static_GetBlockKeysRequestProto_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.internal_static_GetBlockKeysRequestProto_fieldAccessorTable; + } + + // Construct using org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlockKeysRequestProto.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlockKeysRequestProto.getDescriptor(); + } + + public org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlockKeysRequestProto getDefaultInstanceForType() { + return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlockKeysRequestProto.getDefaultInstance(); + } + + public org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlockKeysRequestProto build() { + org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlockKeysRequestProto result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + private org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlockKeysRequestProto buildParsed() + throws com.google.protobuf.InvalidProtocolBufferException { + org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlockKeysRequestProto result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException( + result).asInvalidProtocolBufferException(); + } + return result; + } + + public org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlockKeysRequestProto buildPartial() { + org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlockKeysRequestProto result = new org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlockKeysRequestProto(this); + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlockKeysRequestProto) { + return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlockKeysRequestProto)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlockKeysRequestProto other) { + if (other == org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlockKeysRequestProto.getDefaultInstance()) return this; + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder( + this.getUnknownFields()); + while (true) { + int tag = input.readTag(); + switch (tag) { + case 0: + this.setUnknownFields(unknownFields.build()); + onChanged(); + return this; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + this.setUnknownFields(unknownFields.build()); + onChanged(); + return this; + } + break; + } + } + } + } + + + // @@protoc_insertion_point(builder_scope:GetBlockKeysRequestProto) + } + + static { + defaultInstance = new GetBlockKeysRequestProto(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:GetBlockKeysRequestProto) + } + + public interface GetBlockKeysResponseProtoOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // required .ExportedBlockKeysProto keys = 1; + boolean hasKeys(); + org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExportedBlockKeysProto getKeys(); + org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExportedBlockKeysProtoOrBuilder getKeysOrBuilder(); + } + public static final class GetBlockKeysResponseProto extends + com.google.protobuf.GeneratedMessage + implements GetBlockKeysResponseProtoOrBuilder { + // Use GetBlockKeysResponseProto.newBuilder() to construct. + private GetBlockKeysResponseProto(Builder builder) { + super(builder); + } + private GetBlockKeysResponseProto(boolean noInit) {} + + private static final GetBlockKeysResponseProto defaultInstance; + public static GetBlockKeysResponseProto getDefaultInstance() { + return defaultInstance; + } + + public GetBlockKeysResponseProto getDefaultInstanceForType() { + return defaultInstance; + } + + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.internal_static_GetBlockKeysResponseProto_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.internal_static_GetBlockKeysResponseProto_fieldAccessorTable; + } + + private int bitField0_; + // required .ExportedBlockKeysProto keys = 1; + public static final int KEYS_FIELD_NUMBER = 1; + private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExportedBlockKeysProto keys_; + public boolean hasKeys() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExportedBlockKeysProto getKeys() { + return keys_; + } + public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExportedBlockKeysProtoOrBuilder getKeysOrBuilder() { + return keys_; + } + + private void initFields() { + keys_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExportedBlockKeysProto.getDefaultInstance(); + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + if (!hasKeys()) { + memoizedIsInitialized = 0; + return false; + } + if (!getKeys().isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeMessage(1, keys_); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(1, keys_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlockKeysResponseProto)) { + return super.equals(obj); + } + org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlockKeysResponseProto other = (org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlockKeysResponseProto) obj; + + boolean result = true; + result = result && (hasKeys() == other.hasKeys()); + if (hasKeys()) { + result = result && getKeys() + .equals(other.getKeys()); + } + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + @java.lang.Override + public int hashCode() { + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + if (hasKeys()) { + hash = (37 * hash) + KEYS_FIELD_NUMBER; + hash = (53 * hash) + getKeys().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + return hash; + } + + public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlockKeysResponseProto parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data).buildParsed(); + } + public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlockKeysResponseProto parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data, extensionRegistry) + .buildParsed(); + } + public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlockKeysResponseProto parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data).buildParsed(); + } + public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlockKeysResponseProto parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data, extensionRegistry) + .buildParsed(); + } + public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlockKeysResponseProto parseFrom(java.io.InputStream input) + throws java.io.IOException { + return newBuilder().mergeFrom(input).buildParsed(); + } + public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlockKeysResponseProto parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return newBuilder().mergeFrom(input, extensionRegistry) + .buildParsed(); + } + public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlockKeysResponseProto parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + Builder builder = newBuilder(); + if (builder.mergeDelimitedFrom(input)) { + return builder.buildParsed(); + } else { + return null; + } + } + public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlockKeysResponseProto parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + Builder builder = newBuilder(); + if (builder.mergeDelimitedFrom(input, extensionRegistry)) { + return builder.buildParsed(); + } else { + return null; + } + } + public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlockKeysResponseProto parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return newBuilder().mergeFrom(input).buildParsed(); + } + public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlockKeysResponseProto parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return newBuilder().mergeFrom(input, extensionRegistry) + .buildParsed(); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlockKeysResponseProto prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlockKeysResponseProtoOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.internal_static_GetBlockKeysResponseProto_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.internal_static_GetBlockKeysResponseProto_fieldAccessorTable; + } + + // Construct using org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlockKeysResponseProto.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + getKeysFieldBuilder(); + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + if (keysBuilder_ == null) { + keys_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExportedBlockKeysProto.getDefaultInstance(); + } else { + keysBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000001); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlockKeysResponseProto.getDescriptor(); + } + + public org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlockKeysResponseProto getDefaultInstanceForType() { + return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlockKeysResponseProto.getDefaultInstance(); + } + + public org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlockKeysResponseProto build() { + org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlockKeysResponseProto result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + private org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlockKeysResponseProto buildParsed() + throws com.google.protobuf.InvalidProtocolBufferException { + org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlockKeysResponseProto result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException( + result).asInvalidProtocolBufferException(); + } + return result; + } + + public org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlockKeysResponseProto buildPartial() { + org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlockKeysResponseProto result = new org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlockKeysResponseProto(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + if (keysBuilder_ == null) { + result.keys_ = keys_; + } else { + result.keys_ = keysBuilder_.build(); + } + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlockKeysResponseProto) { + return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlockKeysResponseProto)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlockKeysResponseProto other) { + if (other == org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlockKeysResponseProto.getDefaultInstance()) return this; + if (other.hasKeys()) { + mergeKeys(other.getKeys()); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + if (!hasKeys()) { + + return false; + } + if (!getKeys().isInitialized()) { + + return false; + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder( + this.getUnknownFields()); + while (true) { + int tag = input.readTag(); + switch (tag) { + case 0: + this.setUnknownFields(unknownFields.build()); + onChanged(); + return this; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + this.setUnknownFields(unknownFields.build()); + onChanged(); + return this; + } + break; + } + case 10: { + org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExportedBlockKeysProto.Builder subBuilder = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExportedBlockKeysProto.newBuilder(); + if (hasKeys()) { + subBuilder.mergeFrom(getKeys()); + } + input.readMessage(subBuilder, extensionRegistry); + setKeys(subBuilder.buildPartial()); + break; + } + } + } + } + + private int bitField0_; + + // required .ExportedBlockKeysProto keys = 1; + private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExportedBlockKeysProto keys_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExportedBlockKeysProto.getDefaultInstance(); + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExportedBlockKeysProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExportedBlockKeysProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExportedBlockKeysProtoOrBuilder> keysBuilder_; + public boolean hasKeys() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExportedBlockKeysProto getKeys() { + if (keysBuilder_ == null) { + return keys_; + } else { + return keysBuilder_.getMessage(); + } + } + public Builder setKeys(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExportedBlockKeysProto value) { + if (keysBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + keys_ = value; + onChanged(); + } else { + keysBuilder_.setMessage(value); + } + bitField0_ |= 0x00000001; + return this; + } + public Builder setKeys( + org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExportedBlockKeysProto.Builder builderForValue) { + if (keysBuilder_ == null) { + keys_ = builderForValue.build(); + onChanged(); + } else { + keysBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000001; + return this; + } + public Builder mergeKeys(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExportedBlockKeysProto value) { + if (keysBuilder_ == null) { + if (((bitField0_ & 0x00000001) == 0x00000001) && + keys_ != org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExportedBlockKeysProto.getDefaultInstance()) { + keys_ = + org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExportedBlockKeysProto.newBuilder(keys_).mergeFrom(value).buildPartial(); + } else { + keys_ = value; + } + onChanged(); + } else { + keysBuilder_.mergeFrom(value); + } + bitField0_ |= 0x00000001; + return this; + } + public Builder clearKeys() { + if (keysBuilder_ == null) { + keys_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExportedBlockKeysProto.getDefaultInstance(); + onChanged(); + } else { + keysBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000001); + return this; + } + public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExportedBlockKeysProto.Builder getKeysBuilder() { + bitField0_ |= 0x00000001; + onChanged(); + return getKeysFieldBuilder().getBuilder(); + } + public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExportedBlockKeysProtoOrBuilder getKeysOrBuilder() { + if (keysBuilder_ != null) { + return keysBuilder_.getMessageOrBuilder(); + } else { + return keys_; + } + } + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExportedBlockKeysProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExportedBlockKeysProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExportedBlockKeysProtoOrBuilder> + getKeysFieldBuilder() { + if (keysBuilder_ == null) { + keysBuilder_ = new com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExportedBlockKeysProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExportedBlockKeysProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExportedBlockKeysProtoOrBuilder>( + keys_, + getParentForChildren(), + isClean()); + keys_ = null; + } + return keysBuilder_; + } + + // @@protoc_insertion_point(builder_scope:GetBlockKeysResponseProto) + } + + static { + defaultInstance = new GetBlockKeysResponseProto(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:GetBlockKeysResponseProto) + } + + public interface GetTransactionIdRequestProtoOrBuilder + extends com.google.protobuf.MessageOrBuilder { + } + public static final class GetTransactionIdRequestProto extends + com.google.protobuf.GeneratedMessage + implements GetTransactionIdRequestProtoOrBuilder { + // Use GetTransactionIdRequestProto.newBuilder() to construct. + private GetTransactionIdRequestProto(Builder builder) { + super(builder); + } + private GetTransactionIdRequestProto(boolean noInit) {} + + private static final GetTransactionIdRequestProto defaultInstance; + public static GetTransactionIdRequestProto getDefaultInstance() { + return defaultInstance; + } + + public GetTransactionIdRequestProto getDefaultInstanceForType() { + return defaultInstance; + } + + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.internal_static_GetTransactionIdRequestProto_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.internal_static_GetTransactionIdRequestProto_fieldAccessorTable; + } + + private void initFields() { + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetTransactionIdRequestProto)) { + return super.equals(obj); + } + org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetTransactionIdRequestProto other = (org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetTransactionIdRequestProto) obj; + + boolean result = true; + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + @java.lang.Override + public int hashCode() { + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + hash = (29 * hash) + getUnknownFields().hashCode(); + return hash; + } + + public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetTransactionIdRequestProto parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data).buildParsed(); + } + public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetTransactionIdRequestProto parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data, extensionRegistry) + .buildParsed(); + } + public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetTransactionIdRequestProto parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data).buildParsed(); + } + public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetTransactionIdRequestProto parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data, extensionRegistry) + .buildParsed(); + } + public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetTransactionIdRequestProto parseFrom(java.io.InputStream input) + throws java.io.IOException { + return newBuilder().mergeFrom(input).buildParsed(); + } + public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetTransactionIdRequestProto parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return newBuilder().mergeFrom(input, extensionRegistry) + .buildParsed(); + } + public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetTransactionIdRequestProto parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + Builder builder = newBuilder(); + if (builder.mergeDelimitedFrom(input)) { + return builder.buildParsed(); + } else { + return null; + } + } + public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetTransactionIdRequestProto parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + Builder builder = newBuilder(); + if (builder.mergeDelimitedFrom(input, extensionRegistry)) { + return builder.buildParsed(); + } else { + return null; + } + } + public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetTransactionIdRequestProto parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return newBuilder().mergeFrom(input).buildParsed(); + } + public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetTransactionIdRequestProto parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return newBuilder().mergeFrom(input, extensionRegistry) + .buildParsed(); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetTransactionIdRequestProto prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetTransactionIdRequestProtoOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.internal_static_GetTransactionIdRequestProto_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.internal_static_GetTransactionIdRequestProto_fieldAccessorTable; + } + + // Construct using org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetTransactionIdRequestProto.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetTransactionIdRequestProto.getDescriptor(); + } + + public org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetTransactionIdRequestProto getDefaultInstanceForType() { + return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetTransactionIdRequestProto.getDefaultInstance(); + } + + public org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetTransactionIdRequestProto build() { + org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetTransactionIdRequestProto result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + private org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetTransactionIdRequestProto buildParsed() + throws com.google.protobuf.InvalidProtocolBufferException { + org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetTransactionIdRequestProto result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException( + result).asInvalidProtocolBufferException(); + } + return result; + } + + public org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetTransactionIdRequestProto buildPartial() { + org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetTransactionIdRequestProto result = new org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetTransactionIdRequestProto(this); + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetTransactionIdRequestProto) { + return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetTransactionIdRequestProto)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetTransactionIdRequestProto other) { + if (other == org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetTransactionIdRequestProto.getDefaultInstance()) return this; + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder( + this.getUnknownFields()); + while (true) { + int tag = input.readTag(); + switch (tag) { + case 0: + this.setUnknownFields(unknownFields.build()); + onChanged(); + return this; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + this.setUnknownFields(unknownFields.build()); + onChanged(); + return this; + } + break; + } + } + } + } + + + // @@protoc_insertion_point(builder_scope:GetTransactionIdRequestProto) + } + + static { + defaultInstance = new GetTransactionIdRequestProto(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:GetTransactionIdRequestProto) + } + + public interface GetTransactionIdResponseProtoOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // required uint64 txId = 1; + boolean hasTxId(); + long getTxId(); + } + public static final class GetTransactionIdResponseProto extends + com.google.protobuf.GeneratedMessage + implements GetTransactionIdResponseProtoOrBuilder { + // Use GetTransactionIdResponseProto.newBuilder() to construct. + private GetTransactionIdResponseProto(Builder builder) { + super(builder); + } + private GetTransactionIdResponseProto(boolean noInit) {} + + private static final GetTransactionIdResponseProto defaultInstance; + public static GetTransactionIdResponseProto getDefaultInstance() { + return defaultInstance; + } + + public GetTransactionIdResponseProto getDefaultInstanceForType() { + return defaultInstance; + } + + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.internal_static_GetTransactionIdResponseProto_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.internal_static_GetTransactionIdResponseProto_fieldAccessorTable; + } + + private int bitField0_; + // required uint64 txId = 1; + public static final int TXID_FIELD_NUMBER = 1; + private long txId_; + public boolean hasTxId() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + public long getTxId() { + return txId_; + } + + private void initFields() { + txId_ = 0L; + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + if (!hasTxId()) { + memoizedIsInitialized = 0; + return false; + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeUInt64(1, txId_); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeUInt64Size(1, txId_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetTransactionIdResponseProto)) { + return super.equals(obj); + } + org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetTransactionIdResponseProto other = (org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetTransactionIdResponseProto) obj; + + boolean result = true; + result = result && (hasTxId() == other.hasTxId()); + if (hasTxId()) { + result = result && (getTxId() + == other.getTxId()); + } + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + @java.lang.Override + public int hashCode() { + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + if (hasTxId()) { + hash = (37 * hash) + TXID_FIELD_NUMBER; + hash = (53 * hash) + hashLong(getTxId()); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + return hash; + } + + public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetTransactionIdResponseProto parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data).buildParsed(); + } + public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetTransactionIdResponseProto parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data, extensionRegistry) + .buildParsed(); + } + public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetTransactionIdResponseProto parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data).buildParsed(); + } + public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetTransactionIdResponseProto parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data, extensionRegistry) + .buildParsed(); + } + public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetTransactionIdResponseProto parseFrom(java.io.InputStream input) + throws java.io.IOException { + return newBuilder().mergeFrom(input).buildParsed(); + } + public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetTransactionIdResponseProto parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return newBuilder().mergeFrom(input, extensionRegistry) + .buildParsed(); + } + public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetTransactionIdResponseProto parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + Builder builder = newBuilder(); + if (builder.mergeDelimitedFrom(input)) { + return builder.buildParsed(); + } else { + return null; + } + } + public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetTransactionIdResponseProto parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + Builder builder = newBuilder(); + if (builder.mergeDelimitedFrom(input, extensionRegistry)) { + return builder.buildParsed(); + } else { + return null; + } + } + public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetTransactionIdResponseProto parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return newBuilder().mergeFrom(input).buildParsed(); + } + public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetTransactionIdResponseProto parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return newBuilder().mergeFrom(input, extensionRegistry) + .buildParsed(); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetTransactionIdResponseProto prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetTransactionIdResponseProtoOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.internal_static_GetTransactionIdResponseProto_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.internal_static_GetTransactionIdResponseProto_fieldAccessorTable; + } + + // Construct using org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetTransactionIdResponseProto.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + txId_ = 0L; + bitField0_ = (bitField0_ & ~0x00000001); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetTransactionIdResponseProto.getDescriptor(); + } + + public org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetTransactionIdResponseProto getDefaultInstanceForType() { + return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetTransactionIdResponseProto.getDefaultInstance(); + } + + public org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetTransactionIdResponseProto build() { + org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetTransactionIdResponseProto result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + private org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetTransactionIdResponseProto buildParsed() + throws com.google.protobuf.InvalidProtocolBufferException { + org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetTransactionIdResponseProto result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException( + result).asInvalidProtocolBufferException(); + } + return result; + } + + public org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetTransactionIdResponseProto buildPartial() { + org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetTransactionIdResponseProto result = new org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetTransactionIdResponseProto(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + result.txId_ = txId_; + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetTransactionIdResponseProto) { + return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetTransactionIdResponseProto)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetTransactionIdResponseProto other) { + if (other == org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetTransactionIdResponseProto.getDefaultInstance()) return this; + if (other.hasTxId()) { + setTxId(other.getTxId()); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + if (!hasTxId()) { + + return false; + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder( + this.getUnknownFields()); + while (true) { + int tag = input.readTag(); + switch (tag) { + case 0: + this.setUnknownFields(unknownFields.build()); + onChanged(); + return this; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + this.setUnknownFields(unknownFields.build()); + onChanged(); + return this; + } + break; + } + case 8: { + bitField0_ |= 0x00000001; + txId_ = input.readUInt64(); + break; + } + } + } + } + + private int bitField0_; + + // required uint64 txId = 1; + private long txId_ ; + public boolean hasTxId() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + public long getTxId() { + return txId_; + } + public Builder setTxId(long value) { + bitField0_ |= 0x00000001; + txId_ = value; + onChanged(); + return this; + } + public Builder clearTxId() { + bitField0_ = (bitField0_ & ~0x00000001); + txId_ = 0L; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:GetTransactionIdResponseProto) + } + + static { + defaultInstance = new GetTransactionIdResponseProto(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:GetTransactionIdResponseProto) + } + + public interface RollEditLogRequestProtoOrBuilder + extends com.google.protobuf.MessageOrBuilder { + } + public static final class RollEditLogRequestProto extends + com.google.protobuf.GeneratedMessage + implements RollEditLogRequestProtoOrBuilder { + // Use RollEditLogRequestProto.newBuilder() to construct. + private RollEditLogRequestProto(Builder builder) { + super(builder); + } + private RollEditLogRequestProto(boolean noInit) {} + + private static final RollEditLogRequestProto defaultInstance; + public static RollEditLogRequestProto getDefaultInstance() { + return defaultInstance; + } + + public RollEditLogRequestProto getDefaultInstanceForType() { + return defaultInstance; + } + + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.internal_static_RollEditLogRequestProto_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.internal_static_RollEditLogRequestProto_fieldAccessorTable; + } + + private void initFields() { + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RollEditLogRequestProto)) { + return super.equals(obj); + } + org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RollEditLogRequestProto other = (org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RollEditLogRequestProto) obj; + + boolean result = true; + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + @java.lang.Override + public int hashCode() { + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + hash = (29 * hash) + getUnknownFields().hashCode(); + return hash; + } + + public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RollEditLogRequestProto parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data).buildParsed(); + } + public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RollEditLogRequestProto parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data, extensionRegistry) + .buildParsed(); + } + public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RollEditLogRequestProto parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data).buildParsed(); + } + public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RollEditLogRequestProto parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data, extensionRegistry) + .buildParsed(); + } + public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RollEditLogRequestProto parseFrom(java.io.InputStream input) + throws java.io.IOException { + return newBuilder().mergeFrom(input).buildParsed(); + } + public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RollEditLogRequestProto parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return newBuilder().mergeFrom(input, extensionRegistry) + .buildParsed(); + } + public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RollEditLogRequestProto parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + Builder builder = newBuilder(); + if (builder.mergeDelimitedFrom(input)) { + return builder.buildParsed(); + } else { + return null; + } + } + public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RollEditLogRequestProto parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + Builder builder = newBuilder(); + if (builder.mergeDelimitedFrom(input, extensionRegistry)) { + return builder.buildParsed(); + } else { + return null; + } + } + public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RollEditLogRequestProto parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return newBuilder().mergeFrom(input).buildParsed(); + } + public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RollEditLogRequestProto parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return newBuilder().mergeFrom(input, extensionRegistry) + .buildParsed(); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RollEditLogRequestProto prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RollEditLogRequestProtoOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.internal_static_RollEditLogRequestProto_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.internal_static_RollEditLogRequestProto_fieldAccessorTable; + } + + // Construct using org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RollEditLogRequestProto.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RollEditLogRequestProto.getDescriptor(); + } + + public org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RollEditLogRequestProto getDefaultInstanceForType() { + return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RollEditLogRequestProto.getDefaultInstance(); + } + + public org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RollEditLogRequestProto build() { + org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RollEditLogRequestProto result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + private org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RollEditLogRequestProto buildParsed() + throws com.google.protobuf.InvalidProtocolBufferException { + org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RollEditLogRequestProto result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException( + result).asInvalidProtocolBufferException(); + } + return result; + } + + public org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RollEditLogRequestProto buildPartial() { + org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RollEditLogRequestProto result = new org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RollEditLogRequestProto(this); + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RollEditLogRequestProto) { + return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RollEditLogRequestProto)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RollEditLogRequestProto other) { + if (other == org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RollEditLogRequestProto.getDefaultInstance()) return this; + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder( + this.getUnknownFields()); + while (true) { + int tag = input.readTag(); + switch (tag) { + case 0: + this.setUnknownFields(unknownFields.build()); + onChanged(); + return this; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + this.setUnknownFields(unknownFields.build()); + onChanged(); + return this; + } + break; + } + } + } + } + + + // @@protoc_insertion_point(builder_scope:RollEditLogRequestProto) + } + + static { + defaultInstance = new RollEditLogRequestProto(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:RollEditLogRequestProto) + } + + public interface RollEditLogResponseProtoOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // required .CheckpointSignatureProto signature = 1; + boolean hasSignature(); + org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointSignatureProto getSignature(); + org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointSignatureProtoOrBuilder getSignatureOrBuilder(); + } + public static final class RollEditLogResponseProto extends + com.google.protobuf.GeneratedMessage + implements RollEditLogResponseProtoOrBuilder { + // Use RollEditLogResponseProto.newBuilder() to construct. + private RollEditLogResponseProto(Builder builder) { + super(builder); + } + private RollEditLogResponseProto(boolean noInit) {} + + private static final RollEditLogResponseProto defaultInstance; + public static RollEditLogResponseProto getDefaultInstance() { + return defaultInstance; + } + + public RollEditLogResponseProto getDefaultInstanceForType() { + return defaultInstance; + } + + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.internal_static_RollEditLogResponseProto_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.internal_static_RollEditLogResponseProto_fieldAccessorTable; + } + + private int bitField0_; + // required .CheckpointSignatureProto signature = 1; + public static final int SIGNATURE_FIELD_NUMBER = 1; + private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointSignatureProto signature_; + public boolean hasSignature() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointSignatureProto getSignature() { + return signature_; + } + public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointSignatureProtoOrBuilder getSignatureOrBuilder() { + return signature_; + } + + private void initFields() { + signature_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointSignatureProto.getDefaultInstance(); + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + if (!hasSignature()) { + memoizedIsInitialized = 0; + return false; + } + if (!getSignature().isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeMessage(1, signature_); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(1, signature_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RollEditLogResponseProto)) { + return super.equals(obj); + } + org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RollEditLogResponseProto other = (org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RollEditLogResponseProto) obj; + + boolean result = true; + result = result && (hasSignature() == other.hasSignature()); + if (hasSignature()) { + result = result && getSignature() + .equals(other.getSignature()); + } + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + @java.lang.Override + public int hashCode() { + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + if (hasSignature()) { + hash = (37 * hash) + SIGNATURE_FIELD_NUMBER; + hash = (53 * hash) + getSignature().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + return hash; + } + + public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RollEditLogResponseProto parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data).buildParsed(); + } + public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RollEditLogResponseProto parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data, extensionRegistry) + .buildParsed(); + } + public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RollEditLogResponseProto parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data).buildParsed(); + } + public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RollEditLogResponseProto parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data, extensionRegistry) + .buildParsed(); + } + public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RollEditLogResponseProto parseFrom(java.io.InputStream input) + throws java.io.IOException { + return newBuilder().mergeFrom(input).buildParsed(); + } + public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RollEditLogResponseProto parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return newBuilder().mergeFrom(input, extensionRegistry) + .buildParsed(); + } + public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RollEditLogResponseProto parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + Builder builder = newBuilder(); + if (builder.mergeDelimitedFrom(input)) { + return builder.buildParsed(); + } else { + return null; + } + } + public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RollEditLogResponseProto parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + Builder builder = newBuilder(); + if (builder.mergeDelimitedFrom(input, extensionRegistry)) { + return builder.buildParsed(); + } else { + return null; + } + } + public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RollEditLogResponseProto parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return newBuilder().mergeFrom(input).buildParsed(); + } + public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RollEditLogResponseProto parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return newBuilder().mergeFrom(input, extensionRegistry) + .buildParsed(); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RollEditLogResponseProto prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RollEditLogResponseProtoOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.internal_static_RollEditLogResponseProto_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.internal_static_RollEditLogResponseProto_fieldAccessorTable; + } + + // Construct using org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RollEditLogResponseProto.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + getSignatureFieldBuilder(); + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + if (signatureBuilder_ == null) { + signature_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointSignatureProto.getDefaultInstance(); + } else { + signatureBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000001); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RollEditLogResponseProto.getDescriptor(); + } + + public org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RollEditLogResponseProto getDefaultInstanceForType() { + return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RollEditLogResponseProto.getDefaultInstance(); + } + + public org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RollEditLogResponseProto build() { + org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RollEditLogResponseProto result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + private org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RollEditLogResponseProto buildParsed() + throws com.google.protobuf.InvalidProtocolBufferException { + org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RollEditLogResponseProto result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException( + result).asInvalidProtocolBufferException(); + } + return result; + } + + public org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RollEditLogResponseProto buildPartial() { + org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RollEditLogResponseProto result = new org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RollEditLogResponseProto(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + if (signatureBuilder_ == null) { + result.signature_ = signature_; + } else { + result.signature_ = signatureBuilder_.build(); + } + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RollEditLogResponseProto) { + return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RollEditLogResponseProto)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RollEditLogResponseProto other) { + if (other == org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RollEditLogResponseProto.getDefaultInstance()) return this; + if (other.hasSignature()) { + mergeSignature(other.getSignature()); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + if (!hasSignature()) { + + return false; + } + if (!getSignature().isInitialized()) { + + return false; + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder( + this.getUnknownFields()); + while (true) { + int tag = input.readTag(); + switch (tag) { + case 0: + this.setUnknownFields(unknownFields.build()); + onChanged(); + return this; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + this.setUnknownFields(unknownFields.build()); + onChanged(); + return this; + } + break; + } + case 10: { + org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointSignatureProto.Builder subBuilder = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointSignatureProto.newBuilder(); + if (hasSignature()) { + subBuilder.mergeFrom(getSignature()); + } + input.readMessage(subBuilder, extensionRegistry); + setSignature(subBuilder.buildPartial()); + break; + } + } + } + } + + private int bitField0_; + + // required .CheckpointSignatureProto signature = 1; + private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointSignatureProto signature_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointSignatureProto.getDefaultInstance(); + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointSignatureProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointSignatureProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointSignatureProtoOrBuilder> signatureBuilder_; + public boolean hasSignature() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointSignatureProto getSignature() { + if (signatureBuilder_ == null) { + return signature_; + } else { + return signatureBuilder_.getMessage(); + } + } + public Builder setSignature(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointSignatureProto value) { + if (signatureBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + signature_ = value; + onChanged(); + } else { + signatureBuilder_.setMessage(value); + } + bitField0_ |= 0x00000001; + return this; + } + public Builder setSignature( + org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointSignatureProto.Builder builderForValue) { + if (signatureBuilder_ == null) { + signature_ = builderForValue.build(); + onChanged(); + } else { + signatureBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000001; + return this; + } + public Builder mergeSignature(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointSignatureProto value) { + if (signatureBuilder_ == null) { + if (((bitField0_ & 0x00000001) == 0x00000001) && + signature_ != org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointSignatureProto.getDefaultInstance()) { + signature_ = + org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointSignatureProto.newBuilder(signature_).mergeFrom(value).buildPartial(); + } else { + signature_ = value; + } + onChanged(); + } else { + signatureBuilder_.mergeFrom(value); + } + bitField0_ |= 0x00000001; + return this; + } + public Builder clearSignature() { + if (signatureBuilder_ == null) { + signature_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointSignatureProto.getDefaultInstance(); + onChanged(); + } else { + signatureBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000001); + return this; + } + public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointSignatureProto.Builder getSignatureBuilder() { + bitField0_ |= 0x00000001; + onChanged(); + return getSignatureFieldBuilder().getBuilder(); + } + public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointSignatureProtoOrBuilder getSignatureOrBuilder() { + if (signatureBuilder_ != null) { + return signatureBuilder_.getMessageOrBuilder(); + } else { + return signature_; + } + } + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointSignatureProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointSignatureProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointSignatureProtoOrBuilder> + getSignatureFieldBuilder() { + if (signatureBuilder_ == null) { + signatureBuilder_ = new com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointSignatureProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointSignatureProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointSignatureProtoOrBuilder>( + signature_, + getParentForChildren(), + isClean()); + signature_ = null; + } + return signatureBuilder_; + } + + // @@protoc_insertion_point(builder_scope:RollEditLogResponseProto) + } + + static { + defaultInstance = new RollEditLogResponseProto(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:RollEditLogResponseProto) + } + + public interface ErrorReportRequestProtoOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // required .NamenodeRegistrationProto registartion = 1; + boolean hasRegistartion(); + org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto getRegistartion(); + org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProtoOrBuilder getRegistartionOrBuilder(); + + // required uint32 errorCode = 2; + boolean hasErrorCode(); + int getErrorCode(); + + // required string msg = 3; + boolean hasMsg(); + String getMsg(); + } + public static final class ErrorReportRequestProto extends + com.google.protobuf.GeneratedMessage + implements ErrorReportRequestProtoOrBuilder { + // Use ErrorReportRequestProto.newBuilder() to construct. + private ErrorReportRequestProto(Builder builder) { + super(builder); + } + private ErrorReportRequestProto(boolean noInit) {} + + private static final ErrorReportRequestProto defaultInstance; + public static ErrorReportRequestProto getDefaultInstance() { + return defaultInstance; + } + + public ErrorReportRequestProto getDefaultInstanceForType() { + return defaultInstance; + } + + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.internal_static_ErrorReportRequestProto_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.internal_static_ErrorReportRequestProto_fieldAccessorTable; + } + + private int bitField0_; + // required .NamenodeRegistrationProto registartion = 1; + public static final int REGISTARTION_FIELD_NUMBER = 1; + private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto registartion_; + public boolean hasRegistartion() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto getRegistartion() { + return registartion_; + } + public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProtoOrBuilder getRegistartionOrBuilder() { + return registartion_; + } + + // required uint32 errorCode = 2; + public static final int ERRORCODE_FIELD_NUMBER = 2; + private int errorCode_; + public boolean hasErrorCode() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + public int getErrorCode() { + return errorCode_; + } + + // required string msg = 3; + public static final int MSG_FIELD_NUMBER = 3; + private java.lang.Object msg_; + public boolean hasMsg() { + return ((bitField0_ & 0x00000004) == 0x00000004); + } + public String getMsg() { + java.lang.Object ref = msg_; + if (ref instanceof String) { + return (String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + String s = bs.toStringUtf8(); + if (com.google.protobuf.Internal.isValidUtf8(bs)) { + msg_ = s; + } + return s; + } + } + private com.google.protobuf.ByteString getMsgBytes() { + java.lang.Object ref = msg_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((String) ref); + msg_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + private void initFields() { + registartion_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto.getDefaultInstance(); + errorCode_ = 0; + msg_ = ""; + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + if (!hasRegistartion()) { + memoizedIsInitialized = 0; + return false; + } + if (!hasErrorCode()) { + memoizedIsInitialized = 0; + return false; + } + if (!hasMsg()) { + memoizedIsInitialized = 0; + return false; + } + if (!getRegistartion().isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeMessage(1, registartion_); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + output.writeUInt32(2, errorCode_); + } + if (((bitField0_ & 0x00000004) == 0x00000004)) { + output.writeBytes(3, getMsgBytes()); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(1, registartion_); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + size += com.google.protobuf.CodedOutputStream + .computeUInt32Size(2, errorCode_); + } + if (((bitField0_ & 0x00000004) == 0x00000004)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(3, getMsgBytes()); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.ErrorReportRequestProto)) { + return super.equals(obj); + } + org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.ErrorReportRequestProto other = (org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.ErrorReportRequestProto) obj; + + boolean result = true; + result = result && (hasRegistartion() == other.hasRegistartion()); + if (hasRegistartion()) { + result = result && getRegistartion() + .equals(other.getRegistartion()); + } + result = result && (hasErrorCode() == other.hasErrorCode()); + if (hasErrorCode()) { + result = result && (getErrorCode() + == other.getErrorCode()); + } + result = result && (hasMsg() == other.hasMsg()); + if (hasMsg()) { + result = result && getMsg() + .equals(other.getMsg()); + } + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + @java.lang.Override + public int hashCode() { + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + if (hasRegistartion()) { + hash = (37 * hash) + REGISTARTION_FIELD_NUMBER; + hash = (53 * hash) + getRegistartion().hashCode(); + } + if (hasErrorCode()) { + hash = (37 * hash) + ERRORCODE_FIELD_NUMBER; + hash = (53 * hash) + getErrorCode(); + } + if (hasMsg()) { + hash = (37 * hash) + MSG_FIELD_NUMBER; + hash = (53 * hash) + getMsg().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + return hash; + } + + public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.ErrorReportRequestProto parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data).buildParsed(); + } + public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.ErrorReportRequestProto parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data, extensionRegistry) + .buildParsed(); + } + public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.ErrorReportRequestProto parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data).buildParsed(); + } + public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.ErrorReportRequestProto parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data, extensionRegistry) + .buildParsed(); + } + public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.ErrorReportRequestProto parseFrom(java.io.InputStream input) + throws java.io.IOException { + return newBuilder().mergeFrom(input).buildParsed(); + } + public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.ErrorReportRequestProto parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return newBuilder().mergeFrom(input, extensionRegistry) + .buildParsed(); + } + public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.ErrorReportRequestProto parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + Builder builder = newBuilder(); + if (builder.mergeDelimitedFrom(input)) { + return builder.buildParsed(); + } else { + return null; + } + } + public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.ErrorReportRequestProto parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + Builder builder = newBuilder(); + if (builder.mergeDelimitedFrom(input, extensionRegistry)) { + return builder.buildParsed(); + } else { + return null; + } + } + public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.ErrorReportRequestProto parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return newBuilder().mergeFrom(input).buildParsed(); + } + public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.ErrorReportRequestProto parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return newBuilder().mergeFrom(input, extensionRegistry) + .buildParsed(); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.ErrorReportRequestProto prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.ErrorReportRequestProtoOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.internal_static_ErrorReportRequestProto_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.internal_static_ErrorReportRequestProto_fieldAccessorTable; + } + + // Construct using org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.ErrorReportRequestProto.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + getRegistartionFieldBuilder(); + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + if (registartionBuilder_ == null) { + registartion_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto.getDefaultInstance(); + } else { + registartionBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000001); + errorCode_ = 0; + bitField0_ = (bitField0_ & ~0x00000002); + msg_ = ""; + bitField0_ = (bitField0_ & ~0x00000004); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.ErrorReportRequestProto.getDescriptor(); + } + + public org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.ErrorReportRequestProto getDefaultInstanceForType() { + return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.ErrorReportRequestProto.getDefaultInstance(); + } + + public org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.ErrorReportRequestProto build() { + org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.ErrorReportRequestProto result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + private org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.ErrorReportRequestProto buildParsed() + throws com.google.protobuf.InvalidProtocolBufferException { + org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.ErrorReportRequestProto result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException( + result).asInvalidProtocolBufferException(); + } + return result; + } + + public org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.ErrorReportRequestProto buildPartial() { + org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.ErrorReportRequestProto result = new org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.ErrorReportRequestProto(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + if (registartionBuilder_ == null) { + result.registartion_ = registartion_; + } else { + result.registartion_ = registartionBuilder_.build(); + } + if (((from_bitField0_ & 0x00000002) == 0x00000002)) { + to_bitField0_ |= 0x00000002; + } + result.errorCode_ = errorCode_; + if (((from_bitField0_ & 0x00000004) == 0x00000004)) { + to_bitField0_ |= 0x00000004; + } + result.msg_ = msg_; + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.ErrorReportRequestProto) { + return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.ErrorReportRequestProto)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.ErrorReportRequestProto other) { + if (other == org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.ErrorReportRequestProto.getDefaultInstance()) return this; + if (other.hasRegistartion()) { + mergeRegistartion(other.getRegistartion()); + } + if (other.hasErrorCode()) { + setErrorCode(other.getErrorCode()); + } + if (other.hasMsg()) { + setMsg(other.getMsg()); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + if (!hasRegistartion()) { + + return false; + } + if (!hasErrorCode()) { + + return false; + } + if (!hasMsg()) { + + return false; + } + if (!getRegistartion().isInitialized()) { + + return false; + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder( + this.getUnknownFields()); + while (true) { + int tag = input.readTag(); + switch (tag) { + case 0: + this.setUnknownFields(unknownFields.build()); + onChanged(); + return this; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + this.setUnknownFields(unknownFields.build()); + onChanged(); + return this; + } + break; + } + case 10: { + org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto.Builder subBuilder = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto.newBuilder(); + if (hasRegistartion()) { + subBuilder.mergeFrom(getRegistartion()); + } + input.readMessage(subBuilder, extensionRegistry); + setRegistartion(subBuilder.buildPartial()); + break; + } + case 16: { + bitField0_ |= 0x00000002; + errorCode_ = input.readUInt32(); + break; + } + case 26: { + bitField0_ |= 0x00000004; + msg_ = input.readBytes(); + break; + } + } + } + } + + private int bitField0_; + + // required .NamenodeRegistrationProto registartion = 1; + private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto registartion_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto.getDefaultInstance(); + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProtoOrBuilder> registartionBuilder_; + public boolean hasRegistartion() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto getRegistartion() { + if (registartionBuilder_ == null) { + return registartion_; + } else { + return registartionBuilder_.getMessage(); + } + } + public Builder setRegistartion(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto value) { + if (registartionBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + registartion_ = value; + onChanged(); + } else { + registartionBuilder_.setMessage(value); + } + bitField0_ |= 0x00000001; + return this; + } + public Builder setRegistartion( + org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto.Builder builderForValue) { + if (registartionBuilder_ == null) { + registartion_ = builderForValue.build(); + onChanged(); + } else { + registartionBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000001; + return this; + } + public Builder mergeRegistartion(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto value) { + if (registartionBuilder_ == null) { + if (((bitField0_ & 0x00000001) == 0x00000001) && + registartion_ != org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto.getDefaultInstance()) { + registartion_ = + org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto.newBuilder(registartion_).mergeFrom(value).buildPartial(); + } else { + registartion_ = value; + } + onChanged(); + } else { + registartionBuilder_.mergeFrom(value); + } + bitField0_ |= 0x00000001; + return this; + } + public Builder clearRegistartion() { + if (registartionBuilder_ == null) { + registartion_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto.getDefaultInstance(); + onChanged(); + } else { + registartionBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000001); + return this; + } + public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto.Builder getRegistartionBuilder() { + bitField0_ |= 0x00000001; + onChanged(); + return getRegistartionFieldBuilder().getBuilder(); + } + public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProtoOrBuilder getRegistartionOrBuilder() { + if (registartionBuilder_ != null) { + return registartionBuilder_.getMessageOrBuilder(); + } else { + return registartion_; + } + } + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProtoOrBuilder> + getRegistartionFieldBuilder() { + if (registartionBuilder_ == null) { + registartionBuilder_ = new com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProtoOrBuilder>( + registartion_, + getParentForChildren(), + isClean()); + registartion_ = null; + } + return registartionBuilder_; + } + + // required uint32 errorCode = 2; + private int errorCode_ ; + public boolean hasErrorCode() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + public int getErrorCode() { + return errorCode_; + } + public Builder setErrorCode(int value) { + bitField0_ |= 0x00000002; + errorCode_ = value; + onChanged(); + return this; + } + public Builder clearErrorCode() { + bitField0_ = (bitField0_ & ~0x00000002); + errorCode_ = 0; + onChanged(); + return this; + } + + // required string msg = 3; + private java.lang.Object msg_ = ""; + public boolean hasMsg() { + return ((bitField0_ & 0x00000004) == 0x00000004); + } + public String getMsg() { + java.lang.Object ref = msg_; + if (!(ref instanceof String)) { + String s = ((com.google.protobuf.ByteString) ref).toStringUtf8(); + msg_ = s; + return s; + } else { + return (String) ref; + } + } + public Builder setMsg(String value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000004; + msg_ = value; + onChanged(); + return this; + } + public Builder clearMsg() { + bitField0_ = (bitField0_ & ~0x00000004); + msg_ = getDefaultInstance().getMsg(); + onChanged(); + return this; + } + void setMsg(com.google.protobuf.ByteString value) { + bitField0_ |= 0x00000004; + msg_ = value; + onChanged(); + } + + // @@protoc_insertion_point(builder_scope:ErrorReportRequestProto) + } + + static { + defaultInstance = new ErrorReportRequestProto(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:ErrorReportRequestProto) + } + + public interface ErrorReportResponseProtoOrBuilder + extends com.google.protobuf.MessageOrBuilder { + } + public static final class ErrorReportResponseProto extends + com.google.protobuf.GeneratedMessage + implements ErrorReportResponseProtoOrBuilder { + // Use ErrorReportResponseProto.newBuilder() to construct. + private ErrorReportResponseProto(Builder builder) { + super(builder); + } + private ErrorReportResponseProto(boolean noInit) {} + + private static final ErrorReportResponseProto defaultInstance; + public static ErrorReportResponseProto getDefaultInstance() { + return defaultInstance; + } + + public ErrorReportResponseProto getDefaultInstanceForType() { + return defaultInstance; + } + + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.internal_static_ErrorReportResponseProto_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.internal_static_ErrorReportResponseProto_fieldAccessorTable; + } + + private void initFields() { + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.ErrorReportResponseProto)) { + return super.equals(obj); + } + org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.ErrorReportResponseProto other = (org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.ErrorReportResponseProto) obj; + + boolean result = true; + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + @java.lang.Override + public int hashCode() { + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + hash = (29 * hash) + getUnknownFields().hashCode(); + return hash; + } + + public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.ErrorReportResponseProto parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data).buildParsed(); + } + public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.ErrorReportResponseProto parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data, extensionRegistry) + .buildParsed(); + } + public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.ErrorReportResponseProto parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data).buildParsed(); + } + public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.ErrorReportResponseProto parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data, extensionRegistry) + .buildParsed(); + } + public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.ErrorReportResponseProto parseFrom(java.io.InputStream input) + throws java.io.IOException { + return newBuilder().mergeFrom(input).buildParsed(); + } + public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.ErrorReportResponseProto parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return newBuilder().mergeFrom(input, extensionRegistry) + .buildParsed(); + } + public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.ErrorReportResponseProto parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + Builder builder = newBuilder(); + if (builder.mergeDelimitedFrom(input)) { + return builder.buildParsed(); + } else { + return null; + } + } + public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.ErrorReportResponseProto parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + Builder builder = newBuilder(); + if (builder.mergeDelimitedFrom(input, extensionRegistry)) { + return builder.buildParsed(); + } else { + return null; + } + } + public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.ErrorReportResponseProto parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return newBuilder().mergeFrom(input).buildParsed(); + } + public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.ErrorReportResponseProto parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return newBuilder().mergeFrom(input, extensionRegistry) + .buildParsed(); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.ErrorReportResponseProto prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.ErrorReportResponseProtoOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.internal_static_ErrorReportResponseProto_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.internal_static_ErrorReportResponseProto_fieldAccessorTable; + } + + // Construct using org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.ErrorReportResponseProto.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.ErrorReportResponseProto.getDescriptor(); + } + + public org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.ErrorReportResponseProto getDefaultInstanceForType() { + return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.ErrorReportResponseProto.getDefaultInstance(); + } + + public org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.ErrorReportResponseProto build() { + org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.ErrorReportResponseProto result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + private org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.ErrorReportResponseProto buildParsed() + throws com.google.protobuf.InvalidProtocolBufferException { + org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.ErrorReportResponseProto result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException( + result).asInvalidProtocolBufferException(); + } + return result; + } + + public org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.ErrorReportResponseProto buildPartial() { + org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.ErrorReportResponseProto result = new org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.ErrorReportResponseProto(this); + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.ErrorReportResponseProto) { + return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.ErrorReportResponseProto)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.ErrorReportResponseProto other) { + if (other == org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.ErrorReportResponseProto.getDefaultInstance()) return this; + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder( + this.getUnknownFields()); + while (true) { + int tag = input.readTag(); + switch (tag) { + case 0: + this.setUnknownFields(unknownFields.build()); + onChanged(); + return this; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + this.setUnknownFields(unknownFields.build()); + onChanged(); + return this; + } + break; + } + } + } + } + + + // @@protoc_insertion_point(builder_scope:ErrorReportResponseProto) + } + + static { + defaultInstance = new ErrorReportResponseProto(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:ErrorReportResponseProto) + } + + public interface RegisterRequestProtoOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // required .NamenodeRegistrationProto registration = 1; + boolean hasRegistration(); + org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto getRegistration(); + org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProtoOrBuilder getRegistrationOrBuilder(); + } + public static final class RegisterRequestProto extends + com.google.protobuf.GeneratedMessage + implements RegisterRequestProtoOrBuilder { + // Use RegisterRequestProto.newBuilder() to construct. + private RegisterRequestProto(Builder builder) { + super(builder); + } + private RegisterRequestProto(boolean noInit) {} + + private static final RegisterRequestProto defaultInstance; + public static RegisterRequestProto getDefaultInstance() { + return defaultInstance; + } + + public RegisterRequestProto getDefaultInstanceForType() { + return defaultInstance; + } + + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.internal_static_RegisterRequestProto_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.internal_static_RegisterRequestProto_fieldAccessorTable; + } + + private int bitField0_; + // required .NamenodeRegistrationProto registration = 1; + public static final int REGISTRATION_FIELD_NUMBER = 1; + private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto registration_; + public boolean hasRegistration() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto getRegistration() { + return registration_; + } + public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProtoOrBuilder getRegistrationOrBuilder() { + return registration_; + } + + private void initFields() { + registration_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto.getDefaultInstance(); + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + if (!hasRegistration()) { + memoizedIsInitialized = 0; + return false; + } + if (!getRegistration().isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeMessage(1, registration_); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(1, registration_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RegisterRequestProto)) { + return super.equals(obj); + } + org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RegisterRequestProto other = (org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RegisterRequestProto) obj; + + boolean result = true; + result = result && (hasRegistration() == other.hasRegistration()); + if (hasRegistration()) { + result = result && getRegistration() + .equals(other.getRegistration()); + } + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + @java.lang.Override + public int hashCode() { + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + if (hasRegistration()) { + hash = (37 * hash) + REGISTRATION_FIELD_NUMBER; + hash = (53 * hash) + getRegistration().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + return hash; + } + + public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RegisterRequestProto parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data).buildParsed(); + } + public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RegisterRequestProto parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data, extensionRegistry) + .buildParsed(); + } + public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RegisterRequestProto parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data).buildParsed(); + } + public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RegisterRequestProto parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data, extensionRegistry) + .buildParsed(); + } + public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RegisterRequestProto parseFrom(java.io.InputStream input) + throws java.io.IOException { + return newBuilder().mergeFrom(input).buildParsed(); + } + public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RegisterRequestProto parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return newBuilder().mergeFrom(input, extensionRegistry) + .buildParsed(); + } + public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RegisterRequestProto parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + Builder builder = newBuilder(); + if (builder.mergeDelimitedFrom(input)) { + return builder.buildParsed(); + } else { + return null; + } + } + public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RegisterRequestProto parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + Builder builder = newBuilder(); + if (builder.mergeDelimitedFrom(input, extensionRegistry)) { + return builder.buildParsed(); + } else { + return null; + } + } + public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RegisterRequestProto parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return newBuilder().mergeFrom(input).buildParsed(); + } + public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RegisterRequestProto parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return newBuilder().mergeFrom(input, extensionRegistry) + .buildParsed(); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RegisterRequestProto prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RegisterRequestProtoOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.internal_static_RegisterRequestProto_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.internal_static_RegisterRequestProto_fieldAccessorTable; + } + + // Construct using org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RegisterRequestProto.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + getRegistrationFieldBuilder(); + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + if (registrationBuilder_ == null) { + registration_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto.getDefaultInstance(); + } else { + registrationBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000001); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RegisterRequestProto.getDescriptor(); + } + + public org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RegisterRequestProto getDefaultInstanceForType() { + return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RegisterRequestProto.getDefaultInstance(); + } + + public org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RegisterRequestProto build() { + org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RegisterRequestProto result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + private org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RegisterRequestProto buildParsed() + throws com.google.protobuf.InvalidProtocolBufferException { + org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RegisterRequestProto result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException( + result).asInvalidProtocolBufferException(); + } + return result; + } + + public org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RegisterRequestProto buildPartial() { + org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RegisterRequestProto result = new org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RegisterRequestProto(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + if (registrationBuilder_ == null) { + result.registration_ = registration_; + } else { + result.registration_ = registrationBuilder_.build(); + } + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RegisterRequestProto) { + return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RegisterRequestProto)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RegisterRequestProto other) { + if (other == org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RegisterRequestProto.getDefaultInstance()) return this; + if (other.hasRegistration()) { + mergeRegistration(other.getRegistration()); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + if (!hasRegistration()) { + + return false; + } + if (!getRegistration().isInitialized()) { + + return false; + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder( + this.getUnknownFields()); + while (true) { + int tag = input.readTag(); + switch (tag) { + case 0: + this.setUnknownFields(unknownFields.build()); + onChanged(); + return this; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + this.setUnknownFields(unknownFields.build()); + onChanged(); + return this; + } + break; + } + case 10: { + org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto.Builder subBuilder = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto.newBuilder(); + if (hasRegistration()) { + subBuilder.mergeFrom(getRegistration()); + } + input.readMessage(subBuilder, extensionRegistry); + setRegistration(subBuilder.buildPartial()); + break; + } + } + } + } + + private int bitField0_; + + // required .NamenodeRegistrationProto registration = 1; + private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto registration_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto.getDefaultInstance(); + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProtoOrBuilder> registrationBuilder_; + public boolean hasRegistration() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto getRegistration() { + if (registrationBuilder_ == null) { + return registration_; + } else { + return registrationBuilder_.getMessage(); + } + } + public Builder setRegistration(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto value) { + if (registrationBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + registration_ = value; + onChanged(); + } else { + registrationBuilder_.setMessage(value); + } + bitField0_ |= 0x00000001; + return this; + } + public Builder setRegistration( + org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto.Builder builderForValue) { + if (registrationBuilder_ == null) { + registration_ = builderForValue.build(); + onChanged(); + } else { + registrationBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000001; + return this; + } + public Builder mergeRegistration(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto value) { + if (registrationBuilder_ == null) { + if (((bitField0_ & 0x00000001) == 0x00000001) && + registration_ != org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto.getDefaultInstance()) { + registration_ = + org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto.newBuilder(registration_).mergeFrom(value).buildPartial(); + } else { + registration_ = value; + } + onChanged(); + } else { + registrationBuilder_.mergeFrom(value); + } + bitField0_ |= 0x00000001; + return this; + } + public Builder clearRegistration() { + if (registrationBuilder_ == null) { + registration_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto.getDefaultInstance(); + onChanged(); + } else { + registrationBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000001); + return this; + } + public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto.Builder getRegistrationBuilder() { + bitField0_ |= 0x00000001; + onChanged(); + return getRegistrationFieldBuilder().getBuilder(); + } + public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProtoOrBuilder getRegistrationOrBuilder() { + if (registrationBuilder_ != null) { + return registrationBuilder_.getMessageOrBuilder(); + } else { + return registration_; + } + } + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProtoOrBuilder> + getRegistrationFieldBuilder() { + if (registrationBuilder_ == null) { + registrationBuilder_ = new com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProtoOrBuilder>( + registration_, + getParentForChildren(), + isClean()); + registration_ = null; + } + return registrationBuilder_; + } + + // @@protoc_insertion_point(builder_scope:RegisterRequestProto) + } + + static { + defaultInstance = new RegisterRequestProto(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:RegisterRequestProto) + } + + public interface RegisterResponseProtoOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // required .NamenodeRegistrationProto registration = 1; + boolean hasRegistration(); + org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto getRegistration(); + org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProtoOrBuilder getRegistrationOrBuilder(); + } + public static final class RegisterResponseProto extends + com.google.protobuf.GeneratedMessage + implements RegisterResponseProtoOrBuilder { + // Use RegisterResponseProto.newBuilder() to construct. + private RegisterResponseProto(Builder builder) { + super(builder); + } + private RegisterResponseProto(boolean noInit) {} + + private static final RegisterResponseProto defaultInstance; + public static RegisterResponseProto getDefaultInstance() { + return defaultInstance; + } + + public RegisterResponseProto getDefaultInstanceForType() { + return defaultInstance; + } + + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.internal_static_RegisterResponseProto_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.internal_static_RegisterResponseProto_fieldAccessorTable; + } + + private int bitField0_; + // required .NamenodeRegistrationProto registration = 1; + public static final int REGISTRATION_FIELD_NUMBER = 1; + private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto registration_; + public boolean hasRegistration() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto getRegistration() { + return registration_; + } + public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProtoOrBuilder getRegistrationOrBuilder() { + return registration_; + } + + private void initFields() { + registration_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto.getDefaultInstance(); + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + if (!hasRegistration()) { + memoizedIsInitialized = 0; + return false; + } + if (!getRegistration().isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeMessage(1, registration_); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(1, registration_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RegisterResponseProto)) { + return super.equals(obj); + } + org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RegisterResponseProto other = (org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RegisterResponseProto) obj; + + boolean result = true; + result = result && (hasRegistration() == other.hasRegistration()); + if (hasRegistration()) { + result = result && getRegistration() + .equals(other.getRegistration()); + } + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + @java.lang.Override + public int hashCode() { + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + if (hasRegistration()) { + hash = (37 * hash) + REGISTRATION_FIELD_NUMBER; + hash = (53 * hash) + getRegistration().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + return hash; + } + + public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RegisterResponseProto parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data).buildParsed(); + } + public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RegisterResponseProto parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data, extensionRegistry) + .buildParsed(); + } + public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RegisterResponseProto parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data).buildParsed(); + } + public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RegisterResponseProto parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data, extensionRegistry) + .buildParsed(); + } + public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RegisterResponseProto parseFrom(java.io.InputStream input) + throws java.io.IOException { + return newBuilder().mergeFrom(input).buildParsed(); + } + public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RegisterResponseProto parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return newBuilder().mergeFrom(input, extensionRegistry) + .buildParsed(); + } + public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RegisterResponseProto parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + Builder builder = newBuilder(); + if (builder.mergeDelimitedFrom(input)) { + return builder.buildParsed(); + } else { + return null; + } + } + public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RegisterResponseProto parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + Builder builder = newBuilder(); + if (builder.mergeDelimitedFrom(input, extensionRegistry)) { + return builder.buildParsed(); + } else { + return null; + } + } + public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RegisterResponseProto parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return newBuilder().mergeFrom(input).buildParsed(); + } + public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RegisterResponseProto parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return newBuilder().mergeFrom(input, extensionRegistry) + .buildParsed(); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RegisterResponseProto prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RegisterResponseProtoOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.internal_static_RegisterResponseProto_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.internal_static_RegisterResponseProto_fieldAccessorTable; + } + + // Construct using org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RegisterResponseProto.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + getRegistrationFieldBuilder(); + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + if (registrationBuilder_ == null) { + registration_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto.getDefaultInstance(); + } else { + registrationBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000001); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RegisterResponseProto.getDescriptor(); + } + + public org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RegisterResponseProto getDefaultInstanceForType() { + return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RegisterResponseProto.getDefaultInstance(); + } + + public org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RegisterResponseProto build() { + org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RegisterResponseProto result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + private org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RegisterResponseProto buildParsed() + throws com.google.protobuf.InvalidProtocolBufferException { + org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RegisterResponseProto result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException( + result).asInvalidProtocolBufferException(); + } + return result; + } + + public org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RegisterResponseProto buildPartial() { + org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RegisterResponseProto result = new org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RegisterResponseProto(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + if (registrationBuilder_ == null) { + result.registration_ = registration_; + } else { + result.registration_ = registrationBuilder_.build(); + } + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RegisterResponseProto) { + return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RegisterResponseProto)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RegisterResponseProto other) { + if (other == org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RegisterResponseProto.getDefaultInstance()) return this; + if (other.hasRegistration()) { + mergeRegistration(other.getRegistration()); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + if (!hasRegistration()) { + + return false; + } + if (!getRegistration().isInitialized()) { + + return false; + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder( + this.getUnknownFields()); + while (true) { + int tag = input.readTag(); + switch (tag) { + case 0: + this.setUnknownFields(unknownFields.build()); + onChanged(); + return this; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + this.setUnknownFields(unknownFields.build()); + onChanged(); + return this; + } + break; + } + case 10: { + org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto.Builder subBuilder = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto.newBuilder(); + if (hasRegistration()) { + subBuilder.mergeFrom(getRegistration()); + } + input.readMessage(subBuilder, extensionRegistry); + setRegistration(subBuilder.buildPartial()); + break; + } + } + } + } + + private int bitField0_; + + // required .NamenodeRegistrationProto registration = 1; + private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto registration_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto.getDefaultInstance(); + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProtoOrBuilder> registrationBuilder_; + public boolean hasRegistration() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto getRegistration() { + if (registrationBuilder_ == null) { + return registration_; + } else { + return registrationBuilder_.getMessage(); + } + } + public Builder setRegistration(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto value) { + if (registrationBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + registration_ = value; + onChanged(); + } else { + registrationBuilder_.setMessage(value); + } + bitField0_ |= 0x00000001; + return this; + } + public Builder setRegistration( + org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto.Builder builderForValue) { + if (registrationBuilder_ == null) { + registration_ = builderForValue.build(); + onChanged(); + } else { + registrationBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000001; + return this; + } + public Builder mergeRegistration(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto value) { + if (registrationBuilder_ == null) { + if (((bitField0_ & 0x00000001) == 0x00000001) && + registration_ != org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto.getDefaultInstance()) { + registration_ = + org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto.newBuilder(registration_).mergeFrom(value).buildPartial(); + } else { + registration_ = value; + } + onChanged(); + } else { + registrationBuilder_.mergeFrom(value); + } + bitField0_ |= 0x00000001; + return this; + } + public Builder clearRegistration() { + if (registrationBuilder_ == null) { + registration_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto.getDefaultInstance(); + onChanged(); + } else { + registrationBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000001); + return this; + } + public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto.Builder getRegistrationBuilder() { + bitField0_ |= 0x00000001; + onChanged(); + return getRegistrationFieldBuilder().getBuilder(); + } + public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProtoOrBuilder getRegistrationOrBuilder() { + if (registrationBuilder_ != null) { + return registrationBuilder_.getMessageOrBuilder(); + } else { + return registration_; + } + } + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProtoOrBuilder> + getRegistrationFieldBuilder() { + if (registrationBuilder_ == null) { + registrationBuilder_ = new com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProtoOrBuilder>( + registration_, + getParentForChildren(), + isClean()); + registration_ = null; + } + return registrationBuilder_; + } + + // @@protoc_insertion_point(builder_scope:RegisterResponseProto) + } + + static { + defaultInstance = new RegisterResponseProto(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:RegisterResponseProto) + } + + public interface StartCheckpointRequestProtoOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // required .NamenodeRegistrationProto registration = 1; + boolean hasRegistration(); + org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto getRegistration(); + org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProtoOrBuilder getRegistrationOrBuilder(); + } + public static final class StartCheckpointRequestProto extends + com.google.protobuf.GeneratedMessage + implements StartCheckpointRequestProtoOrBuilder { + // Use StartCheckpointRequestProto.newBuilder() to construct. + private StartCheckpointRequestProto(Builder builder) { + super(builder); + } + private StartCheckpointRequestProto(boolean noInit) {} + + private static final StartCheckpointRequestProto defaultInstance; + public static StartCheckpointRequestProto getDefaultInstance() { + return defaultInstance; + } + + public StartCheckpointRequestProto getDefaultInstanceForType() { + return defaultInstance; + } + + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.internal_static_StartCheckpointRequestProto_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.internal_static_StartCheckpointRequestProto_fieldAccessorTable; + } + + private int bitField0_; + // required .NamenodeRegistrationProto registration = 1; + public static final int REGISTRATION_FIELD_NUMBER = 1; + private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto registration_; + public boolean hasRegistration() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto getRegistration() { + return registration_; + } + public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProtoOrBuilder getRegistrationOrBuilder() { + return registration_; + } + + private void initFields() { + registration_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto.getDefaultInstance(); + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + if (!hasRegistration()) { + memoizedIsInitialized = 0; + return false; + } + if (!getRegistration().isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeMessage(1, registration_); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(1, registration_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.StartCheckpointRequestProto)) { + return super.equals(obj); + } + org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.StartCheckpointRequestProto other = (org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.StartCheckpointRequestProto) obj; + + boolean result = true; + result = result && (hasRegistration() == other.hasRegistration()); + if (hasRegistration()) { + result = result && getRegistration() + .equals(other.getRegistration()); + } + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + @java.lang.Override + public int hashCode() { + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + if (hasRegistration()) { + hash = (37 * hash) + REGISTRATION_FIELD_NUMBER; + hash = (53 * hash) + getRegistration().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + return hash; + } + + public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.StartCheckpointRequestProto parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data).buildParsed(); + } + public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.StartCheckpointRequestProto parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data, extensionRegistry) + .buildParsed(); + } + public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.StartCheckpointRequestProto parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data).buildParsed(); + } + public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.StartCheckpointRequestProto parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data, extensionRegistry) + .buildParsed(); + } + public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.StartCheckpointRequestProto parseFrom(java.io.InputStream input) + throws java.io.IOException { + return newBuilder().mergeFrom(input).buildParsed(); + } + public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.StartCheckpointRequestProto parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return newBuilder().mergeFrom(input, extensionRegistry) + .buildParsed(); + } + public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.StartCheckpointRequestProto parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + Builder builder = newBuilder(); + if (builder.mergeDelimitedFrom(input)) { + return builder.buildParsed(); + } else { + return null; + } + } + public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.StartCheckpointRequestProto parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + Builder builder = newBuilder(); + if (builder.mergeDelimitedFrom(input, extensionRegistry)) { + return builder.buildParsed(); + } else { + return null; + } + } + public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.StartCheckpointRequestProto parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return newBuilder().mergeFrom(input).buildParsed(); + } + public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.StartCheckpointRequestProto parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return newBuilder().mergeFrom(input, extensionRegistry) + .buildParsed(); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.StartCheckpointRequestProto prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.StartCheckpointRequestProtoOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.internal_static_StartCheckpointRequestProto_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.internal_static_StartCheckpointRequestProto_fieldAccessorTable; + } + + // Construct using org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.StartCheckpointRequestProto.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + getRegistrationFieldBuilder(); + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + if (registrationBuilder_ == null) { + registration_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto.getDefaultInstance(); + } else { + registrationBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000001); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.StartCheckpointRequestProto.getDescriptor(); + } + + public org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.StartCheckpointRequestProto getDefaultInstanceForType() { + return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.StartCheckpointRequestProto.getDefaultInstance(); + } + + public org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.StartCheckpointRequestProto build() { + org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.StartCheckpointRequestProto result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + private org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.StartCheckpointRequestProto buildParsed() + throws com.google.protobuf.InvalidProtocolBufferException { + org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.StartCheckpointRequestProto result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException( + result).asInvalidProtocolBufferException(); + } + return result; + } + + public org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.StartCheckpointRequestProto buildPartial() { + org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.StartCheckpointRequestProto result = new org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.StartCheckpointRequestProto(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + if (registrationBuilder_ == null) { + result.registration_ = registration_; + } else { + result.registration_ = registrationBuilder_.build(); + } + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.StartCheckpointRequestProto) { + return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.StartCheckpointRequestProto)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.StartCheckpointRequestProto other) { + if (other == org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.StartCheckpointRequestProto.getDefaultInstance()) return this; + if (other.hasRegistration()) { + mergeRegistration(other.getRegistration()); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + if (!hasRegistration()) { + + return false; + } + if (!getRegistration().isInitialized()) { + + return false; + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder( + this.getUnknownFields()); + while (true) { + int tag = input.readTag(); + switch (tag) { + case 0: + this.setUnknownFields(unknownFields.build()); + onChanged(); + return this; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + this.setUnknownFields(unknownFields.build()); + onChanged(); + return this; + } + break; + } + case 10: { + org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto.Builder subBuilder = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto.newBuilder(); + if (hasRegistration()) { + subBuilder.mergeFrom(getRegistration()); + } + input.readMessage(subBuilder, extensionRegistry); + setRegistration(subBuilder.buildPartial()); + break; + } + } + } + } + + private int bitField0_; + + // required .NamenodeRegistrationProto registration = 1; + private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto registration_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto.getDefaultInstance(); + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProtoOrBuilder> registrationBuilder_; + public boolean hasRegistration() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto getRegistration() { + if (registrationBuilder_ == null) { + return registration_; + } else { + return registrationBuilder_.getMessage(); + } + } + public Builder setRegistration(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto value) { + if (registrationBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + registration_ = value; + onChanged(); + } else { + registrationBuilder_.setMessage(value); + } + bitField0_ |= 0x00000001; + return this; + } + public Builder setRegistration( + org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto.Builder builderForValue) { + if (registrationBuilder_ == null) { + registration_ = builderForValue.build(); + onChanged(); + } else { + registrationBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000001; + return this; + } + public Builder mergeRegistration(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto value) { + if (registrationBuilder_ == null) { + if (((bitField0_ & 0x00000001) == 0x00000001) && + registration_ != org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto.getDefaultInstance()) { + registration_ = + org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto.newBuilder(registration_).mergeFrom(value).buildPartial(); + } else { + registration_ = value; + } + onChanged(); + } else { + registrationBuilder_.mergeFrom(value); + } + bitField0_ |= 0x00000001; + return this; + } + public Builder clearRegistration() { + if (registrationBuilder_ == null) { + registration_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto.getDefaultInstance(); + onChanged(); + } else { + registrationBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000001); + return this; + } + public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto.Builder getRegistrationBuilder() { + bitField0_ |= 0x00000001; + onChanged(); + return getRegistrationFieldBuilder().getBuilder(); + } + public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProtoOrBuilder getRegistrationOrBuilder() { + if (registrationBuilder_ != null) { + return registrationBuilder_.getMessageOrBuilder(); + } else { + return registration_; + } + } + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProtoOrBuilder> + getRegistrationFieldBuilder() { + if (registrationBuilder_ == null) { + registrationBuilder_ = new com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProtoOrBuilder>( + registration_, + getParentForChildren(), + isClean()); + registration_ = null; + } + return registrationBuilder_; + } + + // @@protoc_insertion_point(builder_scope:StartCheckpointRequestProto) + } + + static { + defaultInstance = new StartCheckpointRequestProto(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:StartCheckpointRequestProto) + } + + public interface StartCheckpointResponseProtoOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // required .NamenodeCommandProto command = 1; + boolean hasCommand(); + org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeCommandProto getCommand(); + org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeCommandProtoOrBuilder getCommandOrBuilder(); + } + public static final class StartCheckpointResponseProto extends + com.google.protobuf.GeneratedMessage + implements StartCheckpointResponseProtoOrBuilder { + // Use StartCheckpointResponseProto.newBuilder() to construct. + private StartCheckpointResponseProto(Builder builder) { + super(builder); + } + private StartCheckpointResponseProto(boolean noInit) {} + + private static final StartCheckpointResponseProto defaultInstance; + public static StartCheckpointResponseProto getDefaultInstance() { + return defaultInstance; + } + + public StartCheckpointResponseProto getDefaultInstanceForType() { + return defaultInstance; + } + + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.internal_static_StartCheckpointResponseProto_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.internal_static_StartCheckpointResponseProto_fieldAccessorTable; + } + + private int bitField0_; + // required .NamenodeCommandProto command = 1; + public static final int COMMAND_FIELD_NUMBER = 1; + private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeCommandProto command_; + public boolean hasCommand() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeCommandProto getCommand() { + return command_; + } + public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeCommandProtoOrBuilder getCommandOrBuilder() { + return command_; + } + + private void initFields() { + command_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeCommandProto.getDefaultInstance(); + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + if (!hasCommand()) { + memoizedIsInitialized = 0; + return false; + } + if (!getCommand().isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeMessage(1, command_); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(1, command_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.StartCheckpointResponseProto)) { + return super.equals(obj); + } + org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.StartCheckpointResponseProto other = (org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.StartCheckpointResponseProto) obj; + + boolean result = true; + result = result && (hasCommand() == other.hasCommand()); + if (hasCommand()) { + result = result && getCommand() + .equals(other.getCommand()); + } + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + @java.lang.Override + public int hashCode() { + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + if (hasCommand()) { + hash = (37 * hash) + COMMAND_FIELD_NUMBER; + hash = (53 * hash) + getCommand().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + return hash; + } + + public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.StartCheckpointResponseProto parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data).buildParsed(); + } + public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.StartCheckpointResponseProto parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data, extensionRegistry) + .buildParsed(); + } + public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.StartCheckpointResponseProto parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data).buildParsed(); + } + public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.StartCheckpointResponseProto parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data, extensionRegistry) + .buildParsed(); + } + public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.StartCheckpointResponseProto parseFrom(java.io.InputStream input) + throws java.io.IOException { + return newBuilder().mergeFrom(input).buildParsed(); + } + public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.StartCheckpointResponseProto parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return newBuilder().mergeFrom(input, extensionRegistry) + .buildParsed(); + } + public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.StartCheckpointResponseProto parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + Builder builder = newBuilder(); + if (builder.mergeDelimitedFrom(input)) { + return builder.buildParsed(); + } else { + return null; + } + } + public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.StartCheckpointResponseProto parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + Builder builder = newBuilder(); + if (builder.mergeDelimitedFrom(input, extensionRegistry)) { + return builder.buildParsed(); + } else { + return null; + } + } + public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.StartCheckpointResponseProto parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return newBuilder().mergeFrom(input).buildParsed(); + } + public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.StartCheckpointResponseProto parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return newBuilder().mergeFrom(input, extensionRegistry) + .buildParsed(); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.StartCheckpointResponseProto prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.StartCheckpointResponseProtoOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.internal_static_StartCheckpointResponseProto_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.internal_static_StartCheckpointResponseProto_fieldAccessorTable; + } + + // Construct using org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.StartCheckpointResponseProto.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + getCommandFieldBuilder(); + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + if (commandBuilder_ == null) { + command_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeCommandProto.getDefaultInstance(); + } else { + commandBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000001); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.StartCheckpointResponseProto.getDescriptor(); + } + + public org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.StartCheckpointResponseProto getDefaultInstanceForType() { + return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.StartCheckpointResponseProto.getDefaultInstance(); + } + + public org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.StartCheckpointResponseProto build() { + org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.StartCheckpointResponseProto result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + private org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.StartCheckpointResponseProto buildParsed() + throws com.google.protobuf.InvalidProtocolBufferException { + org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.StartCheckpointResponseProto result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException( + result).asInvalidProtocolBufferException(); + } + return result; + } + + public org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.StartCheckpointResponseProto buildPartial() { + org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.StartCheckpointResponseProto result = new org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.StartCheckpointResponseProto(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + if (commandBuilder_ == null) { + result.command_ = command_; + } else { + result.command_ = commandBuilder_.build(); + } + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.StartCheckpointResponseProto) { + return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.StartCheckpointResponseProto)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.StartCheckpointResponseProto other) { + if (other == org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.StartCheckpointResponseProto.getDefaultInstance()) return this; + if (other.hasCommand()) { + mergeCommand(other.getCommand()); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + if (!hasCommand()) { + + return false; + } + if (!getCommand().isInitialized()) { + + return false; + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder( + this.getUnknownFields()); + while (true) { + int tag = input.readTag(); + switch (tag) { + case 0: + this.setUnknownFields(unknownFields.build()); + onChanged(); + return this; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + this.setUnknownFields(unknownFields.build()); + onChanged(); + return this; + } + break; + } + case 10: { + org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeCommandProto.Builder subBuilder = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeCommandProto.newBuilder(); + if (hasCommand()) { + subBuilder.mergeFrom(getCommand()); + } + input.readMessage(subBuilder, extensionRegistry); + setCommand(subBuilder.buildPartial()); + break; + } + } + } + } + + private int bitField0_; + + // required .NamenodeCommandProto command = 1; + private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeCommandProto command_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeCommandProto.getDefaultInstance(); + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeCommandProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeCommandProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeCommandProtoOrBuilder> commandBuilder_; + public boolean hasCommand() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeCommandProto getCommand() { + if (commandBuilder_ == null) { + return command_; + } else { + return commandBuilder_.getMessage(); + } + } + public Builder setCommand(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeCommandProto value) { + if (commandBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + command_ = value; + onChanged(); + } else { + commandBuilder_.setMessage(value); + } + bitField0_ |= 0x00000001; + return this; + } + public Builder setCommand( + org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeCommandProto.Builder builderForValue) { + if (commandBuilder_ == null) { + command_ = builderForValue.build(); + onChanged(); + } else { + commandBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000001; + return this; + } + public Builder mergeCommand(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeCommandProto value) { + if (commandBuilder_ == null) { + if (((bitField0_ & 0x00000001) == 0x00000001) && + command_ != org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeCommandProto.getDefaultInstance()) { + command_ = + org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeCommandProto.newBuilder(command_).mergeFrom(value).buildPartial(); + } else { + command_ = value; + } + onChanged(); + } else { + commandBuilder_.mergeFrom(value); + } + bitField0_ |= 0x00000001; + return this; + } + public Builder clearCommand() { + if (commandBuilder_ == null) { + command_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeCommandProto.getDefaultInstance(); + onChanged(); + } else { + commandBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000001); + return this; + } + public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeCommandProto.Builder getCommandBuilder() { + bitField0_ |= 0x00000001; + onChanged(); + return getCommandFieldBuilder().getBuilder(); + } + public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeCommandProtoOrBuilder getCommandOrBuilder() { + if (commandBuilder_ != null) { + return commandBuilder_.getMessageOrBuilder(); + } else { + return command_; + } + } + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeCommandProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeCommandProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeCommandProtoOrBuilder> + getCommandFieldBuilder() { + if (commandBuilder_ == null) { + commandBuilder_ = new com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeCommandProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeCommandProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeCommandProtoOrBuilder>( + command_, + getParentForChildren(), + isClean()); + command_ = null; + } + return commandBuilder_; + } + + // @@protoc_insertion_point(builder_scope:StartCheckpointResponseProto) + } + + static { + defaultInstance = new StartCheckpointResponseProto(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:StartCheckpointResponseProto) + } + + public interface EndCheckpointRequestProtoOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // required .NamenodeRegistrationProto registration = 1; + boolean hasRegistration(); + org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto getRegistration(); + org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProtoOrBuilder getRegistrationOrBuilder(); + + // required .CheckpointSignatureProto signature = 2; + boolean hasSignature(); + org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointSignatureProto getSignature(); + org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointSignatureProtoOrBuilder getSignatureOrBuilder(); + } + public static final class EndCheckpointRequestProto extends + com.google.protobuf.GeneratedMessage + implements EndCheckpointRequestProtoOrBuilder { + // Use EndCheckpointRequestProto.newBuilder() to construct. + private EndCheckpointRequestProto(Builder builder) { + super(builder); + } + private EndCheckpointRequestProto(boolean noInit) {} + + private static final EndCheckpointRequestProto defaultInstance; + public static EndCheckpointRequestProto getDefaultInstance() { + return defaultInstance; + } + + public EndCheckpointRequestProto getDefaultInstanceForType() { + return defaultInstance; + } + + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.internal_static_EndCheckpointRequestProto_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.internal_static_EndCheckpointRequestProto_fieldAccessorTable; + } + + private int bitField0_; + // required .NamenodeRegistrationProto registration = 1; + public static final int REGISTRATION_FIELD_NUMBER = 1; + private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto registration_; + public boolean hasRegistration() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto getRegistration() { + return registration_; + } + public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProtoOrBuilder getRegistrationOrBuilder() { + return registration_; + } + + // required .CheckpointSignatureProto signature = 2; + public static final int SIGNATURE_FIELD_NUMBER = 2; + private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointSignatureProto signature_; + public boolean hasSignature() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointSignatureProto getSignature() { + return signature_; + } + public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointSignatureProtoOrBuilder getSignatureOrBuilder() { + return signature_; + } + + private void initFields() { + registration_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto.getDefaultInstance(); + signature_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointSignatureProto.getDefaultInstance(); + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + if (!hasRegistration()) { + memoizedIsInitialized = 0; + return false; + } + if (!hasSignature()) { + memoizedIsInitialized = 0; + return false; + } + if (!getRegistration().isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + if (!getSignature().isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeMessage(1, registration_); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + output.writeMessage(2, signature_); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(1, registration_); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(2, signature_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.EndCheckpointRequestProto)) { + return super.equals(obj); + } + org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.EndCheckpointRequestProto other = (org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.EndCheckpointRequestProto) obj; + + boolean result = true; + result = result && (hasRegistration() == other.hasRegistration()); + if (hasRegistration()) { + result = result && getRegistration() + .equals(other.getRegistration()); + } + result = result && (hasSignature() == other.hasSignature()); + if (hasSignature()) { + result = result && getSignature() + .equals(other.getSignature()); + } + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + @java.lang.Override + public int hashCode() { + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + if (hasRegistration()) { + hash = (37 * hash) + REGISTRATION_FIELD_NUMBER; + hash = (53 * hash) + getRegistration().hashCode(); + } + if (hasSignature()) { + hash = (37 * hash) + SIGNATURE_FIELD_NUMBER; + hash = (53 * hash) + getSignature().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + return hash; + } + + public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.EndCheckpointRequestProto parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data).buildParsed(); + } + public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.EndCheckpointRequestProto parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data, extensionRegistry) + .buildParsed(); + } + public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.EndCheckpointRequestProto parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data).buildParsed(); + } + public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.EndCheckpointRequestProto parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data, extensionRegistry) + .buildParsed(); + } + public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.EndCheckpointRequestProto parseFrom(java.io.InputStream input) + throws java.io.IOException { + return newBuilder().mergeFrom(input).buildParsed(); + } + public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.EndCheckpointRequestProto parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return newBuilder().mergeFrom(input, extensionRegistry) + .buildParsed(); + } + public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.EndCheckpointRequestProto parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + Builder builder = newBuilder(); + if (builder.mergeDelimitedFrom(input)) { + return builder.buildParsed(); + } else { + return null; + } + } + public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.EndCheckpointRequestProto parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + Builder builder = newBuilder(); + if (builder.mergeDelimitedFrom(input, extensionRegistry)) { + return builder.buildParsed(); + } else { + return null; + } + } + public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.EndCheckpointRequestProto parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return newBuilder().mergeFrom(input).buildParsed(); + } + public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.EndCheckpointRequestProto parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return newBuilder().mergeFrom(input, extensionRegistry) + .buildParsed(); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.EndCheckpointRequestProto prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.EndCheckpointRequestProtoOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.internal_static_EndCheckpointRequestProto_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.internal_static_EndCheckpointRequestProto_fieldAccessorTable; + } + + // Construct using org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.EndCheckpointRequestProto.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + getRegistrationFieldBuilder(); + getSignatureFieldBuilder(); + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + if (registrationBuilder_ == null) { + registration_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto.getDefaultInstance(); + } else { + registrationBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000001); + if (signatureBuilder_ == null) { + signature_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointSignatureProto.getDefaultInstance(); + } else { + signatureBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000002); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.EndCheckpointRequestProto.getDescriptor(); + } + + public org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.EndCheckpointRequestProto getDefaultInstanceForType() { + return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.EndCheckpointRequestProto.getDefaultInstance(); + } + + public org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.EndCheckpointRequestProto build() { + org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.EndCheckpointRequestProto result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + private org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.EndCheckpointRequestProto buildParsed() + throws com.google.protobuf.InvalidProtocolBufferException { + org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.EndCheckpointRequestProto result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException( + result).asInvalidProtocolBufferException(); + } + return result; + } + + public org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.EndCheckpointRequestProto buildPartial() { + org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.EndCheckpointRequestProto result = new org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.EndCheckpointRequestProto(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + if (registrationBuilder_ == null) { + result.registration_ = registration_; + } else { + result.registration_ = registrationBuilder_.build(); + } + if (((from_bitField0_ & 0x00000002) == 0x00000002)) { + to_bitField0_ |= 0x00000002; + } + if (signatureBuilder_ == null) { + result.signature_ = signature_; + } else { + result.signature_ = signatureBuilder_.build(); + } + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.EndCheckpointRequestProto) { + return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.EndCheckpointRequestProto)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.EndCheckpointRequestProto other) { + if (other == org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.EndCheckpointRequestProto.getDefaultInstance()) return this; + if (other.hasRegistration()) { + mergeRegistration(other.getRegistration()); + } + if (other.hasSignature()) { + mergeSignature(other.getSignature()); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + if (!hasRegistration()) { + + return false; + } + if (!hasSignature()) { + + return false; + } + if (!getRegistration().isInitialized()) { + + return false; + } + if (!getSignature().isInitialized()) { + + return false; + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder( + this.getUnknownFields()); + while (true) { + int tag = input.readTag(); + switch (tag) { + case 0: + this.setUnknownFields(unknownFields.build()); + onChanged(); + return this; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + this.setUnknownFields(unknownFields.build()); + onChanged(); + return this; + } + break; + } + case 10: { + org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto.Builder subBuilder = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto.newBuilder(); + if (hasRegistration()) { + subBuilder.mergeFrom(getRegistration()); + } + input.readMessage(subBuilder, extensionRegistry); + setRegistration(subBuilder.buildPartial()); + break; + } + case 18: { + org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointSignatureProto.Builder subBuilder = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointSignatureProto.newBuilder(); + if (hasSignature()) { + subBuilder.mergeFrom(getSignature()); + } + input.readMessage(subBuilder, extensionRegistry); + setSignature(subBuilder.buildPartial()); + break; + } + } + } + } + + private int bitField0_; + + // required .NamenodeRegistrationProto registration = 1; + private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto registration_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto.getDefaultInstance(); + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProtoOrBuilder> registrationBuilder_; + public boolean hasRegistration() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto getRegistration() { + if (registrationBuilder_ == null) { + return registration_; + } else { + return registrationBuilder_.getMessage(); + } + } + public Builder setRegistration(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto value) { + if (registrationBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + registration_ = value; + onChanged(); + } else { + registrationBuilder_.setMessage(value); + } + bitField0_ |= 0x00000001; + return this; + } + public Builder setRegistration( + org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto.Builder builderForValue) { + if (registrationBuilder_ == null) { + registration_ = builderForValue.build(); + onChanged(); + } else { + registrationBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000001; + return this; + } + public Builder mergeRegistration(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto value) { + if (registrationBuilder_ == null) { + if (((bitField0_ & 0x00000001) == 0x00000001) && + registration_ != org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto.getDefaultInstance()) { + registration_ = + org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto.newBuilder(registration_).mergeFrom(value).buildPartial(); + } else { + registration_ = value; + } + onChanged(); + } else { + registrationBuilder_.mergeFrom(value); + } + bitField0_ |= 0x00000001; + return this; + } + public Builder clearRegistration() { + if (registrationBuilder_ == null) { + registration_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto.getDefaultInstance(); + onChanged(); + } else { + registrationBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000001); + return this; + } + public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto.Builder getRegistrationBuilder() { + bitField0_ |= 0x00000001; + onChanged(); + return getRegistrationFieldBuilder().getBuilder(); + } + public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProtoOrBuilder getRegistrationOrBuilder() { + if (registrationBuilder_ != null) { + return registrationBuilder_.getMessageOrBuilder(); + } else { + return registration_; + } + } + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProtoOrBuilder> + getRegistrationFieldBuilder() { + if (registrationBuilder_ == null) { + registrationBuilder_ = new com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProtoOrBuilder>( + registration_, + getParentForChildren(), + isClean()); + registration_ = null; + } + return registrationBuilder_; + } + + // required .CheckpointSignatureProto signature = 2; + private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointSignatureProto signature_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointSignatureProto.getDefaultInstance(); + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointSignatureProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointSignatureProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointSignatureProtoOrBuilder> signatureBuilder_; + public boolean hasSignature() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointSignatureProto getSignature() { + if (signatureBuilder_ == null) { + return signature_; + } else { + return signatureBuilder_.getMessage(); + } + } + public Builder setSignature(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointSignatureProto value) { + if (signatureBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + signature_ = value; + onChanged(); + } else { + signatureBuilder_.setMessage(value); + } + bitField0_ |= 0x00000002; + return this; + } + public Builder setSignature( + org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointSignatureProto.Builder builderForValue) { + if (signatureBuilder_ == null) { + signature_ = builderForValue.build(); + onChanged(); + } else { + signatureBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000002; + return this; + } + public Builder mergeSignature(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointSignatureProto value) { + if (signatureBuilder_ == null) { + if (((bitField0_ & 0x00000002) == 0x00000002) && + signature_ != org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointSignatureProto.getDefaultInstance()) { + signature_ = + org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointSignatureProto.newBuilder(signature_).mergeFrom(value).buildPartial(); + } else { + signature_ = value; + } + onChanged(); + } else { + signatureBuilder_.mergeFrom(value); + } + bitField0_ |= 0x00000002; + return this; + } + public Builder clearSignature() { + if (signatureBuilder_ == null) { + signature_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointSignatureProto.getDefaultInstance(); + onChanged(); + } else { + signatureBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000002); + return this; + } + public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointSignatureProto.Builder getSignatureBuilder() { + bitField0_ |= 0x00000002; + onChanged(); + return getSignatureFieldBuilder().getBuilder(); + } + public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointSignatureProtoOrBuilder getSignatureOrBuilder() { + if (signatureBuilder_ != null) { + return signatureBuilder_.getMessageOrBuilder(); + } else { + return signature_; + } + } + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointSignatureProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointSignatureProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointSignatureProtoOrBuilder> + getSignatureFieldBuilder() { + if (signatureBuilder_ == null) { + signatureBuilder_ = new com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointSignatureProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointSignatureProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointSignatureProtoOrBuilder>( + signature_, + getParentForChildren(), + isClean()); + signature_ = null; + } + return signatureBuilder_; + } + + // @@protoc_insertion_point(builder_scope:EndCheckpointRequestProto) + } + + static { + defaultInstance = new EndCheckpointRequestProto(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:EndCheckpointRequestProto) + } + + public interface EndCheckpointResponseProtoOrBuilder + extends com.google.protobuf.MessageOrBuilder { + } + public static final class EndCheckpointResponseProto extends + com.google.protobuf.GeneratedMessage + implements EndCheckpointResponseProtoOrBuilder { + // Use EndCheckpointResponseProto.newBuilder() to construct. + private EndCheckpointResponseProto(Builder builder) { + super(builder); + } + private EndCheckpointResponseProto(boolean noInit) {} + + private static final EndCheckpointResponseProto defaultInstance; + public static EndCheckpointResponseProto getDefaultInstance() { + return defaultInstance; + } + + public EndCheckpointResponseProto getDefaultInstanceForType() { + return defaultInstance; + } + + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.internal_static_EndCheckpointResponseProto_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.internal_static_EndCheckpointResponseProto_fieldAccessorTable; + } + + private void initFields() { + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.EndCheckpointResponseProto)) { + return super.equals(obj); + } + org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.EndCheckpointResponseProto other = (org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.EndCheckpointResponseProto) obj; + + boolean result = true; + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + @java.lang.Override + public int hashCode() { + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + hash = (29 * hash) + getUnknownFields().hashCode(); + return hash; + } + + public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.EndCheckpointResponseProto parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data).buildParsed(); + } + public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.EndCheckpointResponseProto parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data, extensionRegistry) + .buildParsed(); + } + public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.EndCheckpointResponseProto parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data).buildParsed(); + } + public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.EndCheckpointResponseProto parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data, extensionRegistry) + .buildParsed(); + } + public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.EndCheckpointResponseProto parseFrom(java.io.InputStream input) + throws java.io.IOException { + return newBuilder().mergeFrom(input).buildParsed(); + } + public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.EndCheckpointResponseProto parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return newBuilder().mergeFrom(input, extensionRegistry) + .buildParsed(); + } + public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.EndCheckpointResponseProto parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + Builder builder = newBuilder(); + if (builder.mergeDelimitedFrom(input)) { + return builder.buildParsed(); + } else { + return null; + } + } + public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.EndCheckpointResponseProto parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + Builder builder = newBuilder(); + if (builder.mergeDelimitedFrom(input, extensionRegistry)) { + return builder.buildParsed(); + } else { + return null; + } + } + public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.EndCheckpointResponseProto parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return newBuilder().mergeFrom(input).buildParsed(); + } + public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.EndCheckpointResponseProto parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return newBuilder().mergeFrom(input, extensionRegistry) + .buildParsed(); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.EndCheckpointResponseProto prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.EndCheckpointResponseProtoOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.internal_static_EndCheckpointResponseProto_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.internal_static_EndCheckpointResponseProto_fieldAccessorTable; + } + + // Construct using org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.EndCheckpointResponseProto.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.EndCheckpointResponseProto.getDescriptor(); + } + + public org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.EndCheckpointResponseProto getDefaultInstanceForType() { + return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.EndCheckpointResponseProto.getDefaultInstance(); + } + + public org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.EndCheckpointResponseProto build() { + org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.EndCheckpointResponseProto result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + private org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.EndCheckpointResponseProto buildParsed() + throws com.google.protobuf.InvalidProtocolBufferException { + org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.EndCheckpointResponseProto result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException( + result).asInvalidProtocolBufferException(); + } + return result; + } + + public org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.EndCheckpointResponseProto buildPartial() { + org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.EndCheckpointResponseProto result = new org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.EndCheckpointResponseProto(this); + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.EndCheckpointResponseProto) { + return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.EndCheckpointResponseProto)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.EndCheckpointResponseProto other) { + if (other == org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.EndCheckpointResponseProto.getDefaultInstance()) return this; + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder( + this.getUnknownFields()); + while (true) { + int tag = input.readTag(); + switch (tag) { + case 0: + this.setUnknownFields(unknownFields.build()); + onChanged(); + return this; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + this.setUnknownFields(unknownFields.build()); + onChanged(); + return this; + } + break; + } + } + } + } + + + // @@protoc_insertion_point(builder_scope:EndCheckpointResponseProto) + } + + static { + defaultInstance = new EndCheckpointResponseProto(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:EndCheckpointResponseProto) + } + + public interface GetEditLogManifestRequestProtoOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // required uint64 sinceTxId = 1; + boolean hasSinceTxId(); + long getSinceTxId(); + } + public static final class GetEditLogManifestRequestProto extends + com.google.protobuf.GeneratedMessage + implements GetEditLogManifestRequestProtoOrBuilder { + // Use GetEditLogManifestRequestProto.newBuilder() to construct. + private GetEditLogManifestRequestProto(Builder builder) { + super(builder); + } + private GetEditLogManifestRequestProto(boolean noInit) {} + + private static final GetEditLogManifestRequestProto defaultInstance; + public static GetEditLogManifestRequestProto getDefaultInstance() { + return defaultInstance; + } + + public GetEditLogManifestRequestProto getDefaultInstanceForType() { + return defaultInstance; + } + + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.internal_static_GetEditLogManifestRequestProto_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.internal_static_GetEditLogManifestRequestProto_fieldAccessorTable; + } + + private int bitField0_; + // required uint64 sinceTxId = 1; + public static final int SINCETXID_FIELD_NUMBER = 1; + private long sinceTxId_; + public boolean hasSinceTxId() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + public long getSinceTxId() { + return sinceTxId_; + } + + private void initFields() { + sinceTxId_ = 0L; + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + if (!hasSinceTxId()) { + memoizedIsInitialized = 0; + return false; + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeUInt64(1, sinceTxId_); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeUInt64Size(1, sinceTxId_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetEditLogManifestRequestProto)) { + return super.equals(obj); + } + org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetEditLogManifestRequestProto other = (org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetEditLogManifestRequestProto) obj; + + boolean result = true; + result = result && (hasSinceTxId() == other.hasSinceTxId()); + if (hasSinceTxId()) { + result = result && (getSinceTxId() + == other.getSinceTxId()); + } + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + @java.lang.Override + public int hashCode() { + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + if (hasSinceTxId()) { + hash = (37 * hash) + SINCETXID_FIELD_NUMBER; + hash = (53 * hash) + hashLong(getSinceTxId()); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + return hash; + } + + public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetEditLogManifestRequestProto parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data).buildParsed(); + } + public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetEditLogManifestRequestProto parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data, extensionRegistry) + .buildParsed(); + } + public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetEditLogManifestRequestProto parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data).buildParsed(); + } + public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetEditLogManifestRequestProto parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data, extensionRegistry) + .buildParsed(); + } + public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetEditLogManifestRequestProto parseFrom(java.io.InputStream input) + throws java.io.IOException { + return newBuilder().mergeFrom(input).buildParsed(); + } + public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetEditLogManifestRequestProto parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return newBuilder().mergeFrom(input, extensionRegistry) + .buildParsed(); + } + public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetEditLogManifestRequestProto parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + Builder builder = newBuilder(); + if (builder.mergeDelimitedFrom(input)) { + return builder.buildParsed(); + } else { + return null; + } + } + public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetEditLogManifestRequestProto parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + Builder builder = newBuilder(); + if (builder.mergeDelimitedFrom(input, extensionRegistry)) { + return builder.buildParsed(); + } else { + return null; + } + } + public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetEditLogManifestRequestProto parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return newBuilder().mergeFrom(input).buildParsed(); + } + public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetEditLogManifestRequestProto parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return newBuilder().mergeFrom(input, extensionRegistry) + .buildParsed(); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetEditLogManifestRequestProto prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetEditLogManifestRequestProtoOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.internal_static_GetEditLogManifestRequestProto_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.internal_static_GetEditLogManifestRequestProto_fieldAccessorTable; + } + + // Construct using org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetEditLogManifestRequestProto.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + sinceTxId_ = 0L; + bitField0_ = (bitField0_ & ~0x00000001); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetEditLogManifestRequestProto.getDescriptor(); + } + + public org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetEditLogManifestRequestProto getDefaultInstanceForType() { + return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetEditLogManifestRequestProto.getDefaultInstance(); + } + + public org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetEditLogManifestRequestProto build() { + org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetEditLogManifestRequestProto result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + private org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetEditLogManifestRequestProto buildParsed() + throws com.google.protobuf.InvalidProtocolBufferException { + org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetEditLogManifestRequestProto result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException( + result).asInvalidProtocolBufferException(); + } + return result; + } + + public org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetEditLogManifestRequestProto buildPartial() { + org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetEditLogManifestRequestProto result = new org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetEditLogManifestRequestProto(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + result.sinceTxId_ = sinceTxId_; + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetEditLogManifestRequestProto) { + return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetEditLogManifestRequestProto)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetEditLogManifestRequestProto other) { + if (other == org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetEditLogManifestRequestProto.getDefaultInstance()) return this; + if (other.hasSinceTxId()) { + setSinceTxId(other.getSinceTxId()); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + if (!hasSinceTxId()) { + + return false; + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder( + this.getUnknownFields()); + while (true) { + int tag = input.readTag(); + switch (tag) { + case 0: + this.setUnknownFields(unknownFields.build()); + onChanged(); + return this; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + this.setUnknownFields(unknownFields.build()); + onChanged(); + return this; + } + break; + } + case 8: { + bitField0_ |= 0x00000001; + sinceTxId_ = input.readUInt64(); + break; + } + } + } + } + + private int bitField0_; + + // required uint64 sinceTxId = 1; + private long sinceTxId_ ; + public boolean hasSinceTxId() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + public long getSinceTxId() { + return sinceTxId_; + } + public Builder setSinceTxId(long value) { + bitField0_ |= 0x00000001; + sinceTxId_ = value; + onChanged(); + return this; + } + public Builder clearSinceTxId() { + bitField0_ = (bitField0_ & ~0x00000001); + sinceTxId_ = 0L; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:GetEditLogManifestRequestProto) + } + + static { + defaultInstance = new GetEditLogManifestRequestProto(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:GetEditLogManifestRequestProto) + } + + public interface GetEditLogManifestResponseProtoOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // required .RemoteEditLogManifestProto manifest = 1; + boolean hasManifest(); + org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProto getManifest(); + org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProtoOrBuilder getManifestOrBuilder(); + } + public static final class GetEditLogManifestResponseProto extends + com.google.protobuf.GeneratedMessage + implements GetEditLogManifestResponseProtoOrBuilder { + // Use GetEditLogManifestResponseProto.newBuilder() to construct. + private GetEditLogManifestResponseProto(Builder builder) { + super(builder); + } + private GetEditLogManifestResponseProto(boolean noInit) {} + + private static final GetEditLogManifestResponseProto defaultInstance; + public static GetEditLogManifestResponseProto getDefaultInstance() { + return defaultInstance; + } + + public GetEditLogManifestResponseProto getDefaultInstanceForType() { + return defaultInstance; + } + + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.internal_static_GetEditLogManifestResponseProto_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.internal_static_GetEditLogManifestResponseProto_fieldAccessorTable; + } + + private int bitField0_; + // required .RemoteEditLogManifestProto manifest = 1; + public static final int MANIFEST_FIELD_NUMBER = 1; + private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProto manifest_; + public boolean hasManifest() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProto getManifest() { + return manifest_; + } + public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProtoOrBuilder getManifestOrBuilder() { + return manifest_; + } + + private void initFields() { + manifest_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProto.getDefaultInstance(); + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + if (!hasManifest()) { + memoizedIsInitialized = 0; + return false; + } + if (!getManifest().isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeMessage(1, manifest_); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(1, manifest_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetEditLogManifestResponseProto)) { + return super.equals(obj); + } + org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetEditLogManifestResponseProto other = (org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetEditLogManifestResponseProto) obj; + + boolean result = true; + result = result && (hasManifest() == other.hasManifest()); + if (hasManifest()) { + result = result && getManifest() + .equals(other.getManifest()); + } + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + @java.lang.Override + public int hashCode() { + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + if (hasManifest()) { + hash = (37 * hash) + MANIFEST_FIELD_NUMBER; + hash = (53 * hash) + getManifest().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + return hash; + } + + public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetEditLogManifestResponseProto parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data).buildParsed(); + } + public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetEditLogManifestResponseProto parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data, extensionRegistry) + .buildParsed(); + } + public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetEditLogManifestResponseProto parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data).buildParsed(); + } + public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetEditLogManifestResponseProto parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data, extensionRegistry) + .buildParsed(); + } + public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetEditLogManifestResponseProto parseFrom(java.io.InputStream input) + throws java.io.IOException { + return newBuilder().mergeFrom(input).buildParsed(); + } + public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetEditLogManifestResponseProto parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return newBuilder().mergeFrom(input, extensionRegistry) + .buildParsed(); + } + public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetEditLogManifestResponseProto parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + Builder builder = newBuilder(); + if (builder.mergeDelimitedFrom(input)) { + return builder.buildParsed(); + } else { + return null; + } + } + public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetEditLogManifestResponseProto parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + Builder builder = newBuilder(); + if (builder.mergeDelimitedFrom(input, extensionRegistry)) { + return builder.buildParsed(); + } else { + return null; + } + } + public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetEditLogManifestResponseProto parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return newBuilder().mergeFrom(input).buildParsed(); + } + public static org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetEditLogManifestResponseProto parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return newBuilder().mergeFrom(input, extensionRegistry) + .buildParsed(); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetEditLogManifestResponseProto prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetEditLogManifestResponseProtoOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.internal_static_GetEditLogManifestResponseProto_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.internal_static_GetEditLogManifestResponseProto_fieldAccessorTable; + } + + // Construct using org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetEditLogManifestResponseProto.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + getManifestFieldBuilder(); + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + if (manifestBuilder_ == null) { + manifest_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProto.getDefaultInstance(); + } else { + manifestBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000001); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetEditLogManifestResponseProto.getDescriptor(); + } + + public org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetEditLogManifestResponseProto getDefaultInstanceForType() { + return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetEditLogManifestResponseProto.getDefaultInstance(); + } + + public org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetEditLogManifestResponseProto build() { + org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetEditLogManifestResponseProto result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + private org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetEditLogManifestResponseProto buildParsed() + throws com.google.protobuf.InvalidProtocolBufferException { + org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetEditLogManifestResponseProto result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException( + result).asInvalidProtocolBufferException(); + } + return result; + } + + public org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetEditLogManifestResponseProto buildPartial() { + org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetEditLogManifestResponseProto result = new org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetEditLogManifestResponseProto(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + if (manifestBuilder_ == null) { + result.manifest_ = manifest_; + } else { + result.manifest_ = manifestBuilder_.build(); + } + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetEditLogManifestResponseProto) { + return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetEditLogManifestResponseProto)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetEditLogManifestResponseProto other) { + if (other == org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetEditLogManifestResponseProto.getDefaultInstance()) return this; + if (other.hasManifest()) { + mergeManifest(other.getManifest()); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + if (!hasManifest()) { + + return false; + } + if (!getManifest().isInitialized()) { + + return false; + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder( + this.getUnknownFields()); + while (true) { + int tag = input.readTag(); + switch (tag) { + case 0: + this.setUnknownFields(unknownFields.build()); + onChanged(); + return this; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + this.setUnknownFields(unknownFields.build()); + onChanged(); + return this; + } + break; + } + case 10: { + org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProto.Builder subBuilder = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProto.newBuilder(); + if (hasManifest()) { + subBuilder.mergeFrom(getManifest()); + } + input.readMessage(subBuilder, extensionRegistry); + setManifest(subBuilder.buildPartial()); + break; + } + } + } + } + + private int bitField0_; + + // required .RemoteEditLogManifestProto manifest = 1; + private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProto manifest_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProto.getDefaultInstance(); + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProtoOrBuilder> manifestBuilder_; + public boolean hasManifest() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProto getManifest() { + if (manifestBuilder_ == null) { + return manifest_; + } else { + return manifestBuilder_.getMessage(); + } + } + public Builder setManifest(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProto value) { + if (manifestBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + manifest_ = value; + onChanged(); + } else { + manifestBuilder_.setMessage(value); + } + bitField0_ |= 0x00000001; + return this; + } + public Builder setManifest( + org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProto.Builder builderForValue) { + if (manifestBuilder_ == null) { + manifest_ = builderForValue.build(); + onChanged(); + } else { + manifestBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000001; + return this; + } + public Builder mergeManifest(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProto value) { + if (manifestBuilder_ == null) { + if (((bitField0_ & 0x00000001) == 0x00000001) && + manifest_ != org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProto.getDefaultInstance()) { + manifest_ = + org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProto.newBuilder(manifest_).mergeFrom(value).buildPartial(); + } else { + manifest_ = value; + } + onChanged(); + } else { + manifestBuilder_.mergeFrom(value); + } + bitField0_ |= 0x00000001; + return this; + } + public Builder clearManifest() { + if (manifestBuilder_ == null) { + manifest_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProto.getDefaultInstance(); + onChanged(); + } else { + manifestBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000001); + return this; + } + public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProto.Builder getManifestBuilder() { + bitField0_ |= 0x00000001; + onChanged(); + return getManifestFieldBuilder().getBuilder(); + } + public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProtoOrBuilder getManifestOrBuilder() { + if (manifestBuilder_ != null) { + return manifestBuilder_.getMessageOrBuilder(); + } else { + return manifest_; + } + } + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProtoOrBuilder> + getManifestFieldBuilder() { + if (manifestBuilder_ == null) { + manifestBuilder_ = new com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProtoOrBuilder>( + manifest_, + getParentForChildren(), + isClean()); + manifest_ = null; + } + return manifestBuilder_; + } + + // @@protoc_insertion_point(builder_scope:GetEditLogManifestResponseProto) + } + + static { + defaultInstance = new GetEditLogManifestResponseProto(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:GetEditLogManifestResponseProto) + } + + public static abstract class NamenodeProtocolService + implements com.google.protobuf.Service { + protected NamenodeProtocolService() {} + + public interface Interface { + public abstract void getBlocks( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlocksRequestProto request, + com.google.protobuf.RpcCallback done); + + public abstract void getBlockKeys( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlockKeysRequestProto request, + com.google.protobuf.RpcCallback done); + + public abstract void getTransationId( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetTransactionIdRequestProto request, + com.google.protobuf.RpcCallback done); + + public abstract void rollEditLog( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RollEditLogRequestProto request, + com.google.protobuf.RpcCallback done); + + public abstract void errorReport( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.ErrorReportRequestProto request, + com.google.protobuf.RpcCallback done); + + public abstract void register( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RegisterRequestProto request, + com.google.protobuf.RpcCallback done); + + public abstract void startCheckpoint( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.StartCheckpointRequestProto request, + com.google.protobuf.RpcCallback done); + + public abstract void endCheckpoint( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.EndCheckpointRequestProto request, + com.google.protobuf.RpcCallback done); + + public abstract void getEditLogManifest( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetEditLogManifestRequestProto request, + com.google.protobuf.RpcCallback done); + + } + + public static com.google.protobuf.Service newReflectiveService( + final Interface impl) { + return new NamenodeProtocolService() { + @java.lang.Override + public void getBlocks( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlocksRequestProto request, + com.google.protobuf.RpcCallback done) { + impl.getBlocks(controller, request, done); + } + + @java.lang.Override + public void getBlockKeys( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlockKeysRequestProto request, + com.google.protobuf.RpcCallback done) { + impl.getBlockKeys(controller, request, done); + } + + @java.lang.Override + public void getTransationId( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetTransactionIdRequestProto request, + com.google.protobuf.RpcCallback done) { + impl.getTransationId(controller, request, done); + } + + @java.lang.Override + public void rollEditLog( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RollEditLogRequestProto request, + com.google.protobuf.RpcCallback done) { + impl.rollEditLog(controller, request, done); + } + + @java.lang.Override + public void errorReport( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.ErrorReportRequestProto request, + com.google.protobuf.RpcCallback done) { + impl.errorReport(controller, request, done); + } + + @java.lang.Override + public void register( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RegisterRequestProto request, + com.google.protobuf.RpcCallback done) { + impl.register(controller, request, done); + } + + @java.lang.Override + public void startCheckpoint( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.StartCheckpointRequestProto request, + com.google.protobuf.RpcCallback done) { + impl.startCheckpoint(controller, request, done); + } + + @java.lang.Override + public void endCheckpoint( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.EndCheckpointRequestProto request, + com.google.protobuf.RpcCallback done) { + impl.endCheckpoint(controller, request, done); + } + + @java.lang.Override + public void getEditLogManifest( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetEditLogManifestRequestProto request, + com.google.protobuf.RpcCallback done) { + impl.getEditLogManifest(controller, request, done); + } + + }; + } + + public static com.google.protobuf.BlockingService + newReflectiveBlockingService(final BlockingInterface impl) { + return new com.google.protobuf.BlockingService() { + public final com.google.protobuf.Descriptors.ServiceDescriptor + getDescriptorForType() { + return getDescriptor(); + } + + public final com.google.protobuf.Message callBlockingMethod( + com.google.protobuf.Descriptors.MethodDescriptor method, + com.google.protobuf.RpcController controller, + com.google.protobuf.Message request) + throws com.google.protobuf.ServiceException { + if (method.getService() != getDescriptor()) { + throw new java.lang.IllegalArgumentException( + "Service.callBlockingMethod() given method descriptor for " + + "wrong service type."); + } + switch(method.getIndex()) { + case 0: + return impl.getBlocks(controller, (org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlocksRequestProto)request); + case 1: + return impl.getBlockKeys(controller, (org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlockKeysRequestProto)request); + case 2: + return impl.getTransationId(controller, (org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetTransactionIdRequestProto)request); + case 3: + return impl.rollEditLog(controller, (org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RollEditLogRequestProto)request); + case 4: + return impl.errorReport(controller, (org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.ErrorReportRequestProto)request); + case 5: + return impl.register(controller, (org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RegisterRequestProto)request); + case 6: + return impl.startCheckpoint(controller, (org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.StartCheckpointRequestProto)request); + case 7: + return impl.endCheckpoint(controller, (org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.EndCheckpointRequestProto)request); + case 8: + return impl.getEditLogManifest(controller, (org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetEditLogManifestRequestProto)request); + default: + throw new java.lang.AssertionError("Can't get here."); + } + } + + public final com.google.protobuf.Message + getRequestPrototype( + com.google.protobuf.Descriptors.MethodDescriptor method) { + if (method.getService() != getDescriptor()) { + throw new java.lang.IllegalArgumentException( + "Service.getRequestPrototype() given method " + + "descriptor for wrong service type."); + } + switch(method.getIndex()) { + case 0: + return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlocksRequestProto.getDefaultInstance(); + case 1: + return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlockKeysRequestProto.getDefaultInstance(); + case 2: + return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetTransactionIdRequestProto.getDefaultInstance(); + case 3: + return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RollEditLogRequestProto.getDefaultInstance(); + case 4: + return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.ErrorReportRequestProto.getDefaultInstance(); + case 5: + return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RegisterRequestProto.getDefaultInstance(); + case 6: + return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.StartCheckpointRequestProto.getDefaultInstance(); + case 7: + return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.EndCheckpointRequestProto.getDefaultInstance(); + case 8: + return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetEditLogManifestRequestProto.getDefaultInstance(); + default: + throw new java.lang.AssertionError("Can't get here."); + } + } + + public final com.google.protobuf.Message + getResponsePrototype( + com.google.protobuf.Descriptors.MethodDescriptor method) { + if (method.getService() != getDescriptor()) { + throw new java.lang.IllegalArgumentException( + "Service.getResponsePrototype() given method " + + "descriptor for wrong service type."); + } + switch(method.getIndex()) { + case 0: + return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlocksResponseProto.getDefaultInstance(); + case 1: + return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlockKeysResponseProto.getDefaultInstance(); + case 2: + return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetTransactionIdResponseProto.getDefaultInstance(); + case 3: + return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RollEditLogResponseProto.getDefaultInstance(); + case 4: + return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.ErrorReportResponseProto.getDefaultInstance(); + case 5: + return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RegisterResponseProto.getDefaultInstance(); + case 6: + return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.StartCheckpointResponseProto.getDefaultInstance(); + case 7: + return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.EndCheckpointResponseProto.getDefaultInstance(); + case 8: + return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetEditLogManifestResponseProto.getDefaultInstance(); + default: + throw new java.lang.AssertionError("Can't get here."); + } + } + + }; + } + + public abstract void getBlocks( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlocksRequestProto request, + com.google.protobuf.RpcCallback done); + + public abstract void getBlockKeys( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlockKeysRequestProto request, + com.google.protobuf.RpcCallback done); + + public abstract void getTransationId( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetTransactionIdRequestProto request, + com.google.protobuf.RpcCallback done); + + public abstract void rollEditLog( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RollEditLogRequestProto request, + com.google.protobuf.RpcCallback done); + + public abstract void errorReport( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.ErrorReportRequestProto request, + com.google.protobuf.RpcCallback done); + + public abstract void register( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RegisterRequestProto request, + com.google.protobuf.RpcCallback done); + + public abstract void startCheckpoint( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.StartCheckpointRequestProto request, + com.google.protobuf.RpcCallback done); + + public abstract void endCheckpoint( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.EndCheckpointRequestProto request, + com.google.protobuf.RpcCallback done); + + public abstract void getEditLogManifest( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetEditLogManifestRequestProto request, + com.google.protobuf.RpcCallback done); + + public static final + com.google.protobuf.Descriptors.ServiceDescriptor + getDescriptor() { + return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.getDescriptor().getServices().get(0); + } + public final com.google.protobuf.Descriptors.ServiceDescriptor + getDescriptorForType() { + return getDescriptor(); + } + + public final void callMethod( + com.google.protobuf.Descriptors.MethodDescriptor method, + com.google.protobuf.RpcController controller, + com.google.protobuf.Message request, + com.google.protobuf.RpcCallback< + com.google.protobuf.Message> done) { + if (method.getService() != getDescriptor()) { + throw new java.lang.IllegalArgumentException( + "Service.callMethod() given method descriptor for wrong " + + "service type."); + } + switch(method.getIndex()) { + case 0: + this.getBlocks(controller, (org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlocksRequestProto)request, + com.google.protobuf.RpcUtil.specializeCallback( + done)); + return; + case 1: + this.getBlockKeys(controller, (org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlockKeysRequestProto)request, + com.google.protobuf.RpcUtil.specializeCallback( + done)); + return; + case 2: + this.getTransationId(controller, (org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetTransactionIdRequestProto)request, + com.google.protobuf.RpcUtil.specializeCallback( + done)); + return; + case 3: + this.rollEditLog(controller, (org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RollEditLogRequestProto)request, + com.google.protobuf.RpcUtil.specializeCallback( + done)); + return; + case 4: + this.errorReport(controller, (org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.ErrorReportRequestProto)request, + com.google.protobuf.RpcUtil.specializeCallback( + done)); + return; + case 5: + this.register(controller, (org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RegisterRequestProto)request, + com.google.protobuf.RpcUtil.specializeCallback( + done)); + return; + case 6: + this.startCheckpoint(controller, (org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.StartCheckpointRequestProto)request, + com.google.protobuf.RpcUtil.specializeCallback( + done)); + return; + case 7: + this.endCheckpoint(controller, (org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.EndCheckpointRequestProto)request, + com.google.protobuf.RpcUtil.specializeCallback( + done)); + return; + case 8: + this.getEditLogManifest(controller, (org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetEditLogManifestRequestProto)request, + com.google.protobuf.RpcUtil.specializeCallback( + done)); + return; + default: + throw new java.lang.AssertionError("Can't get here."); + } + } + + public final com.google.protobuf.Message + getRequestPrototype( + com.google.protobuf.Descriptors.MethodDescriptor method) { + if (method.getService() != getDescriptor()) { + throw new java.lang.IllegalArgumentException( + "Service.getRequestPrototype() given method " + + "descriptor for wrong service type."); + } + switch(method.getIndex()) { + case 0: + return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlocksRequestProto.getDefaultInstance(); + case 1: + return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlockKeysRequestProto.getDefaultInstance(); + case 2: + return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetTransactionIdRequestProto.getDefaultInstance(); + case 3: + return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RollEditLogRequestProto.getDefaultInstance(); + case 4: + return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.ErrorReportRequestProto.getDefaultInstance(); + case 5: + return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RegisterRequestProto.getDefaultInstance(); + case 6: + return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.StartCheckpointRequestProto.getDefaultInstance(); + case 7: + return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.EndCheckpointRequestProto.getDefaultInstance(); + case 8: + return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetEditLogManifestRequestProto.getDefaultInstance(); + default: + throw new java.lang.AssertionError("Can't get here."); + } + } + + public final com.google.protobuf.Message + getResponsePrototype( + com.google.protobuf.Descriptors.MethodDescriptor method) { + if (method.getService() != getDescriptor()) { + throw new java.lang.IllegalArgumentException( + "Service.getResponsePrototype() given method " + + "descriptor for wrong service type."); + } + switch(method.getIndex()) { + case 0: + return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlocksResponseProto.getDefaultInstance(); + case 1: + return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlockKeysResponseProto.getDefaultInstance(); + case 2: + return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetTransactionIdResponseProto.getDefaultInstance(); + case 3: + return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RollEditLogResponseProto.getDefaultInstance(); + case 4: + return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.ErrorReportResponseProto.getDefaultInstance(); + case 5: + return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RegisterResponseProto.getDefaultInstance(); + case 6: + return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.StartCheckpointResponseProto.getDefaultInstance(); + case 7: + return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.EndCheckpointResponseProto.getDefaultInstance(); + case 8: + return org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetEditLogManifestResponseProto.getDefaultInstance(); + default: + throw new java.lang.AssertionError("Can't get here."); + } + } + + public static Stub newStub( + com.google.protobuf.RpcChannel channel) { + return new Stub(channel); + } + + public static final class Stub extends org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.NamenodeProtocolService implements Interface { + private Stub(com.google.protobuf.RpcChannel channel) { + this.channel = channel; + } + + private final com.google.protobuf.RpcChannel channel; + + public com.google.protobuf.RpcChannel getChannel() { + return channel; + } + + public void getBlocks( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlocksRequestProto request, + com.google.protobuf.RpcCallback done) { + channel.callMethod( + getDescriptor().getMethods().get(0), + controller, + request, + org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlocksResponseProto.getDefaultInstance(), + com.google.protobuf.RpcUtil.generalizeCallback( + done, + org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlocksResponseProto.class, + org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlocksResponseProto.getDefaultInstance())); + } + + public void getBlockKeys( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlockKeysRequestProto request, + com.google.protobuf.RpcCallback done) { + channel.callMethod( + getDescriptor().getMethods().get(1), + controller, + request, + org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlockKeysResponseProto.getDefaultInstance(), + com.google.protobuf.RpcUtil.generalizeCallback( + done, + org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlockKeysResponseProto.class, + org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlockKeysResponseProto.getDefaultInstance())); + } + + public void getTransationId( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetTransactionIdRequestProto request, + com.google.protobuf.RpcCallback done) { + channel.callMethod( + getDescriptor().getMethods().get(2), + controller, + request, + org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetTransactionIdResponseProto.getDefaultInstance(), + com.google.protobuf.RpcUtil.generalizeCallback( + done, + org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetTransactionIdResponseProto.class, + org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetTransactionIdResponseProto.getDefaultInstance())); + } + + public void rollEditLog( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RollEditLogRequestProto request, + com.google.protobuf.RpcCallback done) { + channel.callMethod( + getDescriptor().getMethods().get(3), + controller, + request, + org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RollEditLogResponseProto.getDefaultInstance(), + com.google.protobuf.RpcUtil.generalizeCallback( + done, + org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RollEditLogResponseProto.class, + org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RollEditLogResponseProto.getDefaultInstance())); + } + + public void errorReport( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.ErrorReportRequestProto request, + com.google.protobuf.RpcCallback done) { + channel.callMethod( + getDescriptor().getMethods().get(4), + controller, + request, + org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.ErrorReportResponseProto.getDefaultInstance(), + com.google.protobuf.RpcUtil.generalizeCallback( + done, + org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.ErrorReportResponseProto.class, + org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.ErrorReportResponseProto.getDefaultInstance())); + } + + public void register( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RegisterRequestProto request, + com.google.protobuf.RpcCallback done) { + channel.callMethod( + getDescriptor().getMethods().get(5), + controller, + request, + org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RegisterResponseProto.getDefaultInstance(), + com.google.protobuf.RpcUtil.generalizeCallback( + done, + org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RegisterResponseProto.class, + org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RegisterResponseProto.getDefaultInstance())); + } + + public void startCheckpoint( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.StartCheckpointRequestProto request, + com.google.protobuf.RpcCallback done) { + channel.callMethod( + getDescriptor().getMethods().get(6), + controller, + request, + org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.StartCheckpointResponseProto.getDefaultInstance(), + com.google.protobuf.RpcUtil.generalizeCallback( + done, + org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.StartCheckpointResponseProto.class, + org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.StartCheckpointResponseProto.getDefaultInstance())); + } + + public void endCheckpoint( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.EndCheckpointRequestProto request, + com.google.protobuf.RpcCallback done) { + channel.callMethod( + getDescriptor().getMethods().get(7), + controller, + request, + org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.EndCheckpointResponseProto.getDefaultInstance(), + com.google.protobuf.RpcUtil.generalizeCallback( + done, + org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.EndCheckpointResponseProto.class, + org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.EndCheckpointResponseProto.getDefaultInstance())); + } + + public void getEditLogManifest( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetEditLogManifestRequestProto request, + com.google.protobuf.RpcCallback done) { + channel.callMethod( + getDescriptor().getMethods().get(8), + controller, + request, + org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetEditLogManifestResponseProto.getDefaultInstance(), + com.google.protobuf.RpcUtil.generalizeCallback( + done, + org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetEditLogManifestResponseProto.class, + org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetEditLogManifestResponseProto.getDefaultInstance())); + } + } + + public static BlockingInterface newBlockingStub( + com.google.protobuf.BlockingRpcChannel channel) { + return new BlockingStub(channel); + } + + public interface BlockingInterface { + public org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlocksResponseProto getBlocks( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlocksRequestProto request) + throws com.google.protobuf.ServiceException; + + public org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlockKeysResponseProto getBlockKeys( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlockKeysRequestProto request) + throws com.google.protobuf.ServiceException; + + public org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetTransactionIdResponseProto getTransationId( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetTransactionIdRequestProto request) + throws com.google.protobuf.ServiceException; + + public org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RollEditLogResponseProto rollEditLog( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RollEditLogRequestProto request) + throws com.google.protobuf.ServiceException; + + public org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.ErrorReportResponseProto errorReport( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.ErrorReportRequestProto request) + throws com.google.protobuf.ServiceException; + + public org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RegisterResponseProto register( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RegisterRequestProto request) + throws com.google.protobuf.ServiceException; + + public org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.StartCheckpointResponseProto startCheckpoint( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.StartCheckpointRequestProto request) + throws com.google.protobuf.ServiceException; + + public org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.EndCheckpointResponseProto endCheckpoint( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.EndCheckpointRequestProto request) + throws com.google.protobuf.ServiceException; + + public org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetEditLogManifestResponseProto getEditLogManifest( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetEditLogManifestRequestProto request) + throws com.google.protobuf.ServiceException; + } + + private static final class BlockingStub implements BlockingInterface { + private BlockingStub(com.google.protobuf.BlockingRpcChannel channel) { + this.channel = channel; + } + + private final com.google.protobuf.BlockingRpcChannel channel; + + public org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlocksResponseProto getBlocks( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlocksRequestProto request) + throws com.google.protobuf.ServiceException { + return (org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlocksResponseProto) channel.callBlockingMethod( + getDescriptor().getMethods().get(0), + controller, + request, + org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlocksResponseProto.getDefaultInstance()); + } + + + public org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlockKeysResponseProto getBlockKeys( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlockKeysRequestProto request) + throws com.google.protobuf.ServiceException { + return (org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlockKeysResponseProto) channel.callBlockingMethod( + getDescriptor().getMethods().get(1), + controller, + request, + org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlockKeysResponseProto.getDefaultInstance()); + } + + + public org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetTransactionIdResponseProto getTransationId( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetTransactionIdRequestProto request) + throws com.google.protobuf.ServiceException { + return (org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetTransactionIdResponseProto) channel.callBlockingMethod( + getDescriptor().getMethods().get(2), + controller, + request, + org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetTransactionIdResponseProto.getDefaultInstance()); + } + + + public org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RollEditLogResponseProto rollEditLog( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RollEditLogRequestProto request) + throws com.google.protobuf.ServiceException { + return (org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RollEditLogResponseProto) channel.callBlockingMethod( + getDescriptor().getMethods().get(3), + controller, + request, + org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RollEditLogResponseProto.getDefaultInstance()); + } + + + public org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.ErrorReportResponseProto errorReport( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.ErrorReportRequestProto request) + throws com.google.protobuf.ServiceException { + return (org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.ErrorReportResponseProto) channel.callBlockingMethod( + getDescriptor().getMethods().get(4), + controller, + request, + org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.ErrorReportResponseProto.getDefaultInstance()); + } + + + public org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RegisterResponseProto register( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RegisterRequestProto request) + throws com.google.protobuf.ServiceException { + return (org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RegisterResponseProto) channel.callBlockingMethod( + getDescriptor().getMethods().get(5), + controller, + request, + org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RegisterResponseProto.getDefaultInstance()); + } + + + public org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.StartCheckpointResponseProto startCheckpoint( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.StartCheckpointRequestProto request) + throws com.google.protobuf.ServiceException { + return (org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.StartCheckpointResponseProto) channel.callBlockingMethod( + getDescriptor().getMethods().get(6), + controller, + request, + org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.StartCheckpointResponseProto.getDefaultInstance()); + } + + + public org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.EndCheckpointResponseProto endCheckpoint( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.EndCheckpointRequestProto request) + throws com.google.protobuf.ServiceException { + return (org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.EndCheckpointResponseProto) channel.callBlockingMethod( + getDescriptor().getMethods().get(7), + controller, + request, + org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.EndCheckpointResponseProto.getDefaultInstance()); + } + + + public org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetEditLogManifestResponseProto getEditLogManifest( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetEditLogManifestRequestProto request) + throws com.google.protobuf.ServiceException { + return (org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetEditLogManifestResponseProto) channel.callBlockingMethod( + getDescriptor().getMethods().get(8), + controller, + request, + org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetEditLogManifestResponseProto.getDefaultInstance()); + } + + } + } + + private static com.google.protobuf.Descriptors.Descriptor + internal_static_GetBlocksRequestProto_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_GetBlocksRequestProto_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_GetBlocksResponseProto_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_GetBlocksResponseProto_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_GetBlockKeysRequestProto_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_GetBlockKeysRequestProto_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_GetBlockKeysResponseProto_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_GetBlockKeysResponseProto_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_GetTransactionIdRequestProto_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_GetTransactionIdRequestProto_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_GetTransactionIdResponseProto_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_GetTransactionIdResponseProto_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_RollEditLogRequestProto_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_RollEditLogRequestProto_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_RollEditLogResponseProto_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_RollEditLogResponseProto_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_ErrorReportRequestProto_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_ErrorReportRequestProto_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_ErrorReportResponseProto_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_ErrorReportResponseProto_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_RegisterRequestProto_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_RegisterRequestProto_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_RegisterResponseProto_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_RegisterResponseProto_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_StartCheckpointRequestProto_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_StartCheckpointRequestProto_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_StartCheckpointResponseProto_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_StartCheckpointResponseProto_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_EndCheckpointRequestProto_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_EndCheckpointRequestProto_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_EndCheckpointResponseProto_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_EndCheckpointResponseProto_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_GetEditLogManifestRequestProto_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_GetEditLogManifestRequestProto_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_GetEditLogManifestResponseProto_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_GetEditLogManifestResponseProto_fieldAccessorTable; + + public static com.google.protobuf.Descriptors.FileDescriptor + getDescriptor() { + return descriptor; + } + private static com.google.protobuf.Descriptors.FileDescriptor + descriptor; + static { + java.lang.String[] descriptorData = { + "\n\026NamenodeProtocol.proto\032\nhdfs.proto\"I\n\025" + + "GetBlocksRequestProto\022\"\n\010datanode\030\001 \002(\0132" + + "\020.DatanodeIDProto\022\014\n\004size\030\002 \002(\004\"B\n\026GetBl" + + "ocksResponseProto\022(\n\006blocks\030\001 \002(\0132\030.Bloc" + + "kWithLocationsProto\"\032\n\030GetBlockKeysReque" + + "stProto\"B\n\031GetBlockKeysResponseProto\022%\n\004" + + "keys\030\001 \002(\0132\027.ExportedBlockKeysProto\"\036\n\034G" + + "etTransactionIdRequestProto\"-\n\035GetTransa" + + "ctionIdResponseProto\022\014\n\004txId\030\001 \002(\004\"\031\n\027Ro" + + "llEditLogRequestProto\"H\n\030RollEditLogResp", + "onseProto\022,\n\tsignature\030\001 \002(\0132\031.Checkpoin" + + "tSignatureProto\"k\n\027ErrorReportRequestPro" + + "to\0220\n\014registartion\030\001 \002(\0132\032.NamenodeRegis" + + "trationProto\022\021\n\terrorCode\030\002 \002(\r\022\013\n\003msg\030\003" + + " \002(\t\"\032\n\030ErrorReportResponseProto\"H\n\024Regi" + + "sterRequestProto\0220\n\014registration\030\001 \002(\0132\032" + + ".NamenodeRegistrationProto\"I\n\025RegisterRe" + + "sponseProto\0220\n\014registration\030\001 \002(\0132\032.Name" + + "nodeRegistrationProto\"O\n\033StartCheckpoint" + + "RequestProto\0220\n\014registration\030\001 \002(\0132\032.Nam", + "enodeRegistrationProto\"F\n\034StartCheckpoin" + + "tResponseProto\022&\n\007command\030\001 \002(\0132\025.Nameno" + + "deCommandProto\"{\n\031EndCheckpointRequestPr" + + "oto\0220\n\014registration\030\001 \002(\0132\032.NamenodeRegi" + + "strationProto\022,\n\tsignature\030\002 \002(\0132\031.Check" + + "pointSignatureProto\"\034\n\032EndCheckpointResp" + + "onseProto\"3\n\036GetEditLogManifestRequestPr" + + "oto\022\021\n\tsinceTxId\030\001 \002(\004\"P\n\037GetEditLogMani" + + "festResponseProto\022-\n\010manifest\030\001 \002(\0132\033.Re" + + "moteEditLogManifestProto2\246\005\n\027NamenodePro", + "tocolService\022<\n\tgetBlocks\022\026.GetBlocksReq" + + "uestProto\032\027.GetBlocksResponseProto\022E\n\014ge" + + "tBlockKeys\022\031.GetBlockKeysRequestProto\032\032." + + "GetBlockKeysResponseProto\022P\n\017getTransati" + + "onId\022\035.GetTransactionIdRequestProto\032\036.Ge" + + "tTransactionIdResponseProto\022B\n\013rollEditL" + + "og\022\030.RollEditLogRequestProto\032\031.RollEditL" + + "ogResponseProto\022B\n\013errorReport\022\030.ErrorRe" + + "portRequestProto\032\031.ErrorReportResponsePr" + + "oto\0229\n\010register\022\025.RegisterRequestProto\032\026", + ".RegisterResponseProto\022N\n\017startCheckpoin" + + "t\022\034.StartCheckpointRequestProto\032\035.StartC" + + "heckpointResponseProto\022H\n\rendCheckpoint\022" + + "\032.EndCheckpointRequestProto\032\033.EndCheckpo" + + "intResponseProto\022W\n\022getEditLogManifest\022\037" + + ".GetEditLogManifestRequestProto\032 .GetEdi" + + "tLogManifestResponseProtoBE\n%org.apache." + + "hadoop.hdfs.protocol.protoB\026NamenodeProt" + + "ocolProtos\210\001\001\240\001\001" + }; + com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner = + new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() { + public com.google.protobuf.ExtensionRegistry assignDescriptors( + com.google.protobuf.Descriptors.FileDescriptor root) { + descriptor = root; + internal_static_GetBlocksRequestProto_descriptor = + getDescriptor().getMessageTypes().get(0); + internal_static_GetBlocksRequestProto_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_GetBlocksRequestProto_descriptor, + new java.lang.String[] { "Datanode", "Size", }, + org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlocksRequestProto.class, + org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlocksRequestProto.Builder.class); + internal_static_GetBlocksResponseProto_descriptor = + getDescriptor().getMessageTypes().get(1); + internal_static_GetBlocksResponseProto_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_GetBlocksResponseProto_descriptor, + new java.lang.String[] { "Blocks", }, + org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlocksResponseProto.class, + org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlocksResponseProto.Builder.class); + internal_static_GetBlockKeysRequestProto_descriptor = + getDescriptor().getMessageTypes().get(2); + internal_static_GetBlockKeysRequestProto_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_GetBlockKeysRequestProto_descriptor, + new java.lang.String[] { }, + org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlockKeysRequestProto.class, + org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlockKeysRequestProto.Builder.class); + internal_static_GetBlockKeysResponseProto_descriptor = + getDescriptor().getMessageTypes().get(3); + internal_static_GetBlockKeysResponseProto_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_GetBlockKeysResponseProto_descriptor, + new java.lang.String[] { "Keys", }, + org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlockKeysResponseProto.class, + org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlockKeysResponseProto.Builder.class); + internal_static_GetTransactionIdRequestProto_descriptor = + getDescriptor().getMessageTypes().get(4); + internal_static_GetTransactionIdRequestProto_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_GetTransactionIdRequestProto_descriptor, + new java.lang.String[] { }, + org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetTransactionIdRequestProto.class, + org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetTransactionIdRequestProto.Builder.class); + internal_static_GetTransactionIdResponseProto_descriptor = + getDescriptor().getMessageTypes().get(5); + internal_static_GetTransactionIdResponseProto_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_GetTransactionIdResponseProto_descriptor, + new java.lang.String[] { "TxId", }, + org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetTransactionIdResponseProto.class, + org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetTransactionIdResponseProto.Builder.class); + internal_static_RollEditLogRequestProto_descriptor = + getDescriptor().getMessageTypes().get(6); + internal_static_RollEditLogRequestProto_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_RollEditLogRequestProto_descriptor, + new java.lang.String[] { }, + org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RollEditLogRequestProto.class, + org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RollEditLogRequestProto.Builder.class); + internal_static_RollEditLogResponseProto_descriptor = + getDescriptor().getMessageTypes().get(7); + internal_static_RollEditLogResponseProto_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_RollEditLogResponseProto_descriptor, + new java.lang.String[] { "Signature", }, + org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RollEditLogResponseProto.class, + org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RollEditLogResponseProto.Builder.class); + internal_static_ErrorReportRequestProto_descriptor = + getDescriptor().getMessageTypes().get(8); + internal_static_ErrorReportRequestProto_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_ErrorReportRequestProto_descriptor, + new java.lang.String[] { "Registartion", "ErrorCode", "Msg", }, + org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.ErrorReportRequestProto.class, + org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.ErrorReportRequestProto.Builder.class); + internal_static_ErrorReportResponseProto_descriptor = + getDescriptor().getMessageTypes().get(9); + internal_static_ErrorReportResponseProto_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_ErrorReportResponseProto_descriptor, + new java.lang.String[] { }, + org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.ErrorReportResponseProto.class, + org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.ErrorReportResponseProto.Builder.class); + internal_static_RegisterRequestProto_descriptor = + getDescriptor().getMessageTypes().get(10); + internal_static_RegisterRequestProto_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_RegisterRequestProto_descriptor, + new java.lang.String[] { "Registration", }, + org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RegisterRequestProto.class, + org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RegisterRequestProto.Builder.class); + internal_static_RegisterResponseProto_descriptor = + getDescriptor().getMessageTypes().get(11); + internal_static_RegisterResponseProto_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_RegisterResponseProto_descriptor, + new java.lang.String[] { "Registration", }, + org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RegisterResponseProto.class, + org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RegisterResponseProto.Builder.class); + internal_static_StartCheckpointRequestProto_descriptor = + getDescriptor().getMessageTypes().get(12); + internal_static_StartCheckpointRequestProto_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_StartCheckpointRequestProto_descriptor, + new java.lang.String[] { "Registration", }, + org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.StartCheckpointRequestProto.class, + org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.StartCheckpointRequestProto.Builder.class); + internal_static_StartCheckpointResponseProto_descriptor = + getDescriptor().getMessageTypes().get(13); + internal_static_StartCheckpointResponseProto_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_StartCheckpointResponseProto_descriptor, + new java.lang.String[] { "Command", }, + org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.StartCheckpointResponseProto.class, + org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.StartCheckpointResponseProto.Builder.class); + internal_static_EndCheckpointRequestProto_descriptor = + getDescriptor().getMessageTypes().get(14); + internal_static_EndCheckpointRequestProto_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_EndCheckpointRequestProto_descriptor, + new java.lang.String[] { "Registration", "Signature", }, + org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.EndCheckpointRequestProto.class, + org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.EndCheckpointRequestProto.Builder.class); + internal_static_EndCheckpointResponseProto_descriptor = + getDescriptor().getMessageTypes().get(15); + internal_static_EndCheckpointResponseProto_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_EndCheckpointResponseProto_descriptor, + new java.lang.String[] { }, + org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.EndCheckpointResponseProto.class, + org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.EndCheckpointResponseProto.Builder.class); + internal_static_GetEditLogManifestRequestProto_descriptor = + getDescriptor().getMessageTypes().get(16); + internal_static_GetEditLogManifestRequestProto_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_GetEditLogManifestRequestProto_descriptor, + new java.lang.String[] { "SinceTxId", }, + org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetEditLogManifestRequestProto.class, + org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetEditLogManifestRequestProto.Builder.class); + internal_static_GetEditLogManifestResponseProto_descriptor = + getDescriptor().getMessageTypes().get(17); + internal_static_GetEditLogManifestResponseProto_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_GetEditLogManifestResponseProto_descriptor, + new java.lang.String[] { "Manifest", }, + org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetEditLogManifestResponseProto.class, + org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetEditLogManifestResponseProto.Builder.class); + return null; + } + }; + com.google.protobuf.Descriptors.FileDescriptor + .internalBuildGeneratedFileFrom(descriptorData, + new com.google.protobuf.Descriptors.FileDescriptor[] { + org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.getDescriptor(), + }, assigner); + } + + // @@protoc_insertion_point(outer_class_scope) +} diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolR23Compatible/ClientDatanodeProtocolServerSideTranslatorR23.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolR23Compatible/ClientDatanodeProtocolServerSideTranslatorR23.java index 05297bad22..e13f7db3c2 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolR23Compatible/ClientDatanodeProtocolServerSideTranslatorR23.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolR23Compatible/ClientDatanodeProtocolServerSideTranslatorR23.java @@ -21,9 +21,13 @@ import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.hdfs.protocol.BlockLocalPathInfo; import org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol; +import org.apache.hadoop.hdfs.protocol.ExtendedBlock; +import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier; import org.apache.hadoop.ipc.ProtocolSignature; import org.apache.hadoop.ipc.RPC; +import org.apache.hadoop.security.token.Token; /** * This class is used on the server side. @@ -116,4 +120,10 @@ public void refreshNamenodes() throws IOException { public void deleteBlockPool(String bpid, boolean force) throws IOException { server.deleteBlockPool(bpid, force); } + + @Override + public BlockLocalPathInfo getBlockLocalPathInfo(ExtendedBlock block, + Token token) throws IOException { + return server.getBlockLocalPathInfo(block, token); + } } \ No newline at end of file diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolR23Compatible/ClientDatanodeProtocolTranslatorR23.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolR23Compatible/ClientDatanodeProtocolTranslatorR23.java index 9e384dd95c..9912f81f82 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolR23Compatible/ClientDatanodeProtocolTranslatorR23.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolR23Compatible/ClientDatanodeProtocolTranslatorR23.java @@ -26,14 +26,17 @@ import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.CommonConfigurationKeysPublic; +import org.apache.hadoop.hdfs.protocol.BlockLocalPathInfo; import org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol; import org.apache.hadoop.hdfs.protocol.DatanodeID; import org.apache.hadoop.hdfs.protocol.ExtendedBlock; import org.apache.hadoop.hdfs.protocol.LocatedBlock; +import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier; import org.apache.hadoop.ipc.ProtocolSignature; import org.apache.hadoop.ipc.RPC; import org.apache.hadoop.net.NetUtils; import org.apache.hadoop.security.UserGroupInformation; +import org.apache.hadoop.security.token.Token; /** @@ -63,6 +66,23 @@ public ClientDatanodeProtocolTranslatorR23(InetSocketAddress addr, rpcProxy = createClientDatanodeProtocolProxy(addr, ticket, conf, factory); } + /** + * Constructor. + * @param datanodeid Datanode to connect to. + * @param conf Configuration. + * @param socketTimeout Socket timeout to use. + * @throws IOException + */ + public ClientDatanodeProtocolTranslatorR23(DatanodeID datanodeid, + Configuration conf, int socketTimeout) throws IOException { + InetSocketAddress addr = NetUtils.createSocketAddr(datanodeid.getHost() + + ":" + datanodeid.getIpcPort()); + rpcProxy = RPC.getProxy(ClientDatanodeWireProtocol.class, + ClientDatanodeWireProtocol.versionID, addr, + UserGroupInformation.getCurrentUser(), conf, + NetUtils.getDefaultSocketFactory(conf), socketTimeout); + } + static ClientDatanodeWireProtocol createClientDatanodeProtocolProxy( DatanodeID datanodeid, Configuration conf, int socketTimeout, LocatedBlock locatedBlock) @@ -134,4 +154,9 @@ public void deleteBlockPool(String bpid, boolean force) throws IOException { } + @Override + public BlockLocalPathInfo getBlockLocalPathInfo(ExtendedBlock block, + Token token) throws IOException { + return rpcProxy.getBlockLocalPathInfo(block, token); + } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolR23Compatible/ClientDatanodeWireProtocol.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolR23Compatible/ClientDatanodeWireProtocol.java index 551d21007c..819e9c6109 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolR23Compatible/ClientDatanodeWireProtocol.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolR23Compatible/ClientDatanodeWireProtocol.java @@ -24,11 +24,15 @@ import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.hdfs.DFSConfigKeys; +import org.apache.hadoop.hdfs.protocol.BlockLocalPathInfo; +import org.apache.hadoop.hdfs.protocol.ExtendedBlock; import org.apache.hadoop.hdfs.protocol.HdfsConstants; +import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier; import org.apache.hadoop.hdfs.security.token.block.BlockTokenSelector; import org.apache.hadoop.ipc.ProtocolInfo; import org.apache.hadoop.ipc.VersionedProtocol; import org.apache.hadoop.security.KerberosInfo; +import org.apache.hadoop.security.token.Token; import org.apache.hadoop.security.token.TokenInfo; /** @@ -77,6 +81,13 @@ public interface ClientDatanodeWireProtocol extends VersionedProtocol { */ void deleteBlockPool(String bpid, boolean force) throws IOException; + /** + * The specification of this method matches that of + * {@link org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol#getBlockLocalPathInfo(ExtendedBlock, Token)} + */ + BlockLocalPathInfo getBlockLocalPathInfo(ExtendedBlock block, + Token token) throws IOException; + /** * This method is defined to get the protocol signature using * the R23 protocol - hence we have added the suffix of 2 to the method name diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java index d29450d6c3..a0146e75a8 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java @@ -39,7 +39,6 @@ import java.util.LinkedList; import java.util.List; import java.util.Map; -import java.util.Random; import java.util.concurrent.ExecutionException; import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java index 64f76e39ba..c6675a2788 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java @@ -29,8 +29,6 @@ import java.util.List; import java.util.Map; import java.util.TreeMap; -import java.util.TreeSet; - import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.HadoopIllegalArgumentException; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java index 403fbabaa3..d927f05297 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java @@ -20,14 +20,10 @@ import java.io.DataInput; import java.io.IOException; import java.util.ArrayList; -import java.util.Collection; import java.util.Iterator; import java.util.LinkedList; import java.util.List; import java.util.Queue; -import java.util.Set; -import java.util.TreeSet; - import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.hdfs.DeprecatedUTF8; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/InvalidateBlocks.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/InvalidateBlocks.java index 6181588475..2c6b46f050 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/InvalidateBlocks.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/InvalidateBlocks.java @@ -20,8 +20,6 @@ import java.io.PrintWriter; import java.util.ArrayList; import java.util.Collection; -import java.util.HashSet; -import java.util.Iterator; import java.util.List; import java.util.Map; import java.util.TreeMap; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/UnderReplicatedBlocks.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/UnderReplicatedBlocks.java index d35db96ec0..787dd2adca 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/UnderReplicatedBlocks.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/UnderReplicatedBlocks.java @@ -20,9 +20,6 @@ import java.util.ArrayList; import java.util.Iterator; import java.util.List; -import java.util.NavigableSet; -import java.util.TreeSet; - import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.util.LightWeightLinkedSet; import org.apache.hadoop.hdfs.server.namenode.NameNode; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPOfferService.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPOfferService.java new file mode 100644 index 0000000000..4d098ebec2 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPOfferService.java @@ -0,0 +1,782 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs.server.datanode; + +import static org.apache.hadoop.hdfs.server.common.Util.now; + +import java.io.IOException; +import java.net.InetSocketAddress; +import java.net.SocketTimeoutException; +import java.net.URI; +import java.util.Collection; +import java.util.LinkedList; + +import org.apache.commons.logging.Log; +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.hdfs.DFSUtil; +import org.apache.hadoop.hdfs.protocol.Block; +import org.apache.hadoop.hdfs.protocol.BlockListAsLongs; +import org.apache.hadoop.hdfs.protocol.DatanodeInfo; +import org.apache.hadoop.hdfs.protocol.ExtendedBlock; +import org.apache.hadoop.hdfs.protocol.HdfsConstants; +import org.apache.hadoop.hdfs.protocol.LocatedBlock; +import org.apache.hadoop.hdfs.protocol.UnregisteredNodeException; +import org.apache.hadoop.hdfs.server.common.IncorrectVersionException; +import org.apache.hadoop.hdfs.server.common.Storage; +import org.apache.hadoop.hdfs.server.namenode.FSNamesystem; +import org.apache.hadoop.hdfs.server.protocol.BalancerBandwidthCommand; +import org.apache.hadoop.hdfs.server.protocol.BlockCommand; +import org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand; +import org.apache.hadoop.hdfs.server.protocol.DatanodeCommand; +import org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol; +import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration; +import org.apache.hadoop.hdfs.server.protocol.DisallowedDatanodeException; +import org.apache.hadoop.hdfs.server.protocol.FinalizeCommand; +import org.apache.hadoop.hdfs.server.protocol.KeyUpdateCommand; +import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo; +import org.apache.hadoop.hdfs.server.protocol.ReceivedDeletedBlockInfo; +import org.apache.hadoop.hdfs.server.protocol.UpgradeCommand; +import org.apache.hadoop.ipc.RPC; +import org.apache.hadoop.ipc.RemoteException; +import org.apache.hadoop.util.StringUtils; + +import com.google.common.annotations.VisibleForTesting; +import com.google.common.base.Preconditions; + +/** + * A thread per namenode to perform: + *
    + *
  • Pre-registration handshake with namenode
  • + *
  • Registration with namenode
  • + *
  • Send periodic heartbeats to the namenode
  • + *
  • Handle commands received from the namenode
  • + *
+ */ +@InterfaceAudience.Private +class BPOfferService implements Runnable { + static final Log LOG = DataNode.LOG; + + final InetSocketAddress nnAddr; + + /** + * Information about the namespace that this service + * is registering with. This is assigned after + * the first phase of the handshake. + */ + NamespaceInfo bpNSInfo; + + /** + * The registration information for this block pool. + * This is assigned after the second phase of the + * handshake. + */ + DatanodeRegistration bpRegistration; + + long lastBlockReport = 0; + long lastDeletedReport = 0; + + boolean resetBlockReportTime = true; + + Thread bpThread; + DatanodeProtocol bpNamenode; + private long lastHeartbeat = 0; + private volatile boolean initialized = false; + private final LinkedList receivedAndDeletedBlockList + = new LinkedList(); + private volatile int pendingReceivedRequests = 0; + private volatile boolean shouldServiceRun = true; + UpgradeManagerDatanode upgradeManager = null; + private final DataNode dn; + private final DNConf dnConf; + + BPOfferService(InetSocketAddress nnAddr, DataNode dn) { + this.dn = dn; + this.nnAddr = nnAddr; + this.dnConf = dn.getDnConf(); + } + + /** + * returns true if BP thread has completed initialization of storage + * and has registered with the corresponding namenode + * @return true if initialized + */ + public boolean isInitialized() { + return initialized; + } + + public boolean isAlive() { + return shouldServiceRun && bpThread.isAlive(); + } + + public String getBlockPoolId() { + if (bpNSInfo != null) { + return bpNSInfo.getBlockPoolID(); + } else { + LOG.warn("Block pool ID needed, but service not yet registered with NN", + new Exception("trace")); + return null; + } + } + + public NamespaceInfo getNamespaceInfo() { + return bpNSInfo; + } + + public String toString() { + if (bpNSInfo == null) { + // If we haven't yet connected to our NN, we don't yet know our + // own block pool ID. + // If _none_ of the block pools have connected yet, we don't even + // know the storage ID of this DN. + String storageId = dn.getStorageId(); + if (storageId == null || "".equals(storageId)) { + storageId = "unknown"; + } + return "Block pool (storage id " + storageId + + ") connecting to " + nnAddr; + } else { + return "Block pool " + getBlockPoolId() + + " (storage id " + dn.getStorageId() + + ") registered with " + nnAddr; + } + } + + InetSocketAddress getNNSocketAddress() { + return nnAddr; + } + + /** + * Used to inject a spy NN in the unit tests. + */ + @VisibleForTesting + void setNameNode(DatanodeProtocol dnProtocol) { + bpNamenode = dnProtocol; + } + + /** + * Perform the first part of the handshake with the NameNode. + * This calls versionRequest to determine the NN's + * namespace and version info. It automatically retries until + * the NN responds or the DN is shutting down. + * + * @return the NamespaceInfo + * @throws IncorrectVersionException if the remote NN does not match + * this DN's version + */ + NamespaceInfo retrieveNamespaceInfo() throws IncorrectVersionException { + NamespaceInfo nsInfo = null; + while (shouldRun()) { + try { + nsInfo = bpNamenode.versionRequest(); + LOG.debug(this + " received versionRequest response: " + nsInfo); + break; + } catch(SocketTimeoutException e) { // namenode is busy + LOG.warn("Problem connecting to server: " + nnAddr); + } catch(IOException e ) { // namenode is not available + LOG.warn("Problem connecting to server: " + nnAddr); + } + + // try again in a second + sleepAndLogInterrupts(5000, "requesting version info from NN"); + } + + if (nsInfo != null) { + checkNNVersion(nsInfo); + } + return nsInfo; + } + + private void checkNNVersion(NamespaceInfo nsInfo) + throws IncorrectVersionException { + // build and layout versions should match + String nsBuildVer = nsInfo.getBuildVersion(); + String stBuildVer = Storage.getBuildVersion(); + if (!nsBuildVer.equals(stBuildVer)) { + LOG.warn("Data-node and name-node Build versions must be the same. " + + "Namenode build version: " + nsBuildVer + "Datanode " + + "build version: " + stBuildVer); + throw new IncorrectVersionException(nsBuildVer, "namenode", stBuildVer); + } + + if (HdfsConstants.LAYOUT_VERSION != nsInfo.getLayoutVersion()) { + LOG.warn("Data-node and name-node layout versions must be the same." + + " Expected: "+ HdfsConstants.LAYOUT_VERSION + + " actual "+ bpNSInfo.getLayoutVersion()); + throw new IncorrectVersionException( + bpNSInfo.getLayoutVersion(), "namenode"); + } + } + + private void connectToNNAndHandshake() throws IOException { + // get NN proxy + bpNamenode = (DatanodeProtocol)RPC.waitForProxy(DatanodeProtocol.class, + DatanodeProtocol.versionID, nnAddr, dn.getConf()); + + // First phase of the handshake with NN - get the namespace + // info. + bpNSInfo = retrieveNamespaceInfo(); + + // Now that we know the namespace ID, etc, we can pass this to the DN. + // The DN can now initialize its local storage if we are the + // first BP to handshake, etc. + dn.initBlockPool(this); + + // Second phase of the handshake with the NN. + register(); + } + + /** + * This methods arranges for the data node to send the block report at + * the next heartbeat. + */ + void scheduleBlockReport(long delay) { + if (delay > 0) { // send BR after random delay + lastBlockReport = System.currentTimeMillis() + - ( dnConf.blockReportInterval - DFSUtil.getRandom().nextInt((int)(delay))); + } else { // send at next heartbeat + lastBlockReport = lastHeartbeat - dnConf.blockReportInterval; + } + resetBlockReportTime = true; // reset future BRs for randomness + } + + void reportBadBlocks(ExtendedBlock block) { + DatanodeInfo[] dnArr = { new DatanodeInfo(bpRegistration) }; + LocatedBlock[] blocks = { new LocatedBlock(block, dnArr) }; + + try { + bpNamenode.reportBadBlocks(blocks); + } catch (IOException e){ + /* One common reason is that NameNode could be in safe mode. + * Should we keep on retrying in that case? + */ + LOG.warn("Failed to report bad block " + block + " to namenode : " + + " Exception", e); + } + + } + + /** + * Report received blocks and delete hints to the Namenode + * + * @throws IOException + */ + private void reportReceivedDeletedBlocks() throws IOException { + + // check if there are newly received blocks + ReceivedDeletedBlockInfo[] receivedAndDeletedBlockArray = null; + int currentReceivedRequestsCounter; + synchronized (receivedAndDeletedBlockList) { + currentReceivedRequestsCounter = pendingReceivedRequests; + int numBlocks = receivedAndDeletedBlockList.size(); + if (numBlocks > 0) { + // + // Send newly-received and deleted blockids to namenode + // + receivedAndDeletedBlockArray = receivedAndDeletedBlockList + .toArray(new ReceivedDeletedBlockInfo[numBlocks]); + } + } + if (receivedAndDeletedBlockArray != null) { + bpNamenode.blockReceivedAndDeleted(bpRegistration, getBlockPoolId(), + receivedAndDeletedBlockArray); + synchronized (receivedAndDeletedBlockList) { + for (int i = 0; i < receivedAndDeletedBlockArray.length; i++) { + receivedAndDeletedBlockList.remove(receivedAndDeletedBlockArray[i]); + } + pendingReceivedRequests -= currentReceivedRequestsCounter; + } + } + } + + /* + * Informing the name node could take a long long time! Should we wait + * till namenode is informed before responding with success to the + * client? For now we don't. + */ + void notifyNamenodeReceivedBlock(ExtendedBlock block, String delHint) { + if (block == null || delHint == null) { + throw new IllegalArgumentException(block == null ? "Block is null" + : "delHint is null"); + } + + if (!block.getBlockPoolId().equals(getBlockPoolId())) { + LOG.warn("BlockPool mismatch " + block.getBlockPoolId() + " vs. " + + getBlockPoolId()); + return; + } + + synchronized (receivedAndDeletedBlockList) { + receivedAndDeletedBlockList.add(new ReceivedDeletedBlockInfo(block + .getLocalBlock(), delHint)); + pendingReceivedRequests++; + receivedAndDeletedBlockList.notifyAll(); + } + } + + void notifyNamenodeDeletedBlock(ExtendedBlock block) { + if (block == null) { + throw new IllegalArgumentException("Block is null"); + } + + if (!block.getBlockPoolId().equals(getBlockPoolId())) { + LOG.warn("BlockPool mismatch " + block.getBlockPoolId() + " vs. " + + getBlockPoolId()); + return; + } + + synchronized (receivedAndDeletedBlockList) { + receivedAndDeletedBlockList.add(new ReceivedDeletedBlockInfo(block + .getLocalBlock(), ReceivedDeletedBlockInfo.TODELETE_HINT)); + } + } + + + /** + * Report the list blocks to the Namenode + * @throws IOException + */ + DatanodeCommand blockReport() throws IOException { + // send block report if timer has expired. + DatanodeCommand cmd = null; + long startTime = now(); + if (startTime - lastBlockReport > dnConf.blockReportInterval) { + + // Create block report + long brCreateStartTime = now(); + BlockListAsLongs bReport = dn.data.getBlockReport(getBlockPoolId()); + + // Send block report + long brSendStartTime = now(); + cmd = bpNamenode.blockReport(bpRegistration, getBlockPoolId(), bReport + .getBlockListAsLongs()); + + // Log the block report processing stats from Datanode perspective + long brSendCost = now() - brSendStartTime; + long brCreateCost = brSendStartTime - brCreateStartTime; + dn.metrics.addBlockReport(brSendCost); + LOG.info("BlockReport of " + bReport.getNumberOfBlocks() + + " blocks took " + brCreateCost + " msec to generate and " + + brSendCost + " msecs for RPC and NN processing"); + + // If we have sent the first block report, then wait a random + // time before we start the periodic block reports. + if (resetBlockReportTime) { + lastBlockReport = startTime - DFSUtil.getRandom().nextInt((int)(dnConf.blockReportInterval)); + resetBlockReportTime = false; + } else { + /* say the last block report was at 8:20:14. The current report + * should have started around 9:20:14 (default 1 hour interval). + * If current time is : + * 1) normal like 9:20:18, next report should be at 10:20:14 + * 2) unexpected like 11:35:43, next report should be at 12:20:14 + */ + lastBlockReport += (now() - lastBlockReport) / + dnConf.blockReportInterval * dnConf.blockReportInterval; + } + LOG.info("sent block report, processed command:" + cmd); + } + return cmd; + } + + + DatanodeCommand [] sendHeartBeat() throws IOException { + return bpNamenode.sendHeartbeat(bpRegistration, + dn.data.getCapacity(), + dn.data.getDfsUsed(), + dn.data.getRemaining(), + dn.data.getBlockPoolUsed(getBlockPoolId()), + dn.xmitsInProgress.get(), + dn.getXceiverCount(), dn.data.getNumFailedVolumes()); + } + + //This must be called only by blockPoolManager + void start() { + if ((bpThread != null) && (bpThread.isAlive())) { + //Thread is started already + return; + } + bpThread = new Thread(this, formatThreadName()); + bpThread.setDaemon(true); // needed for JUnit testing + bpThread.start(); + } + + private String formatThreadName() { + Collection dataDirs = DataNode.getStorageDirs(dn.getConf()); + return "DataNode: [" + + StringUtils.uriToString(dataDirs.toArray(new URI[0])) + "] " + + " heartbeating to " + nnAddr; + } + + //This must be called only by blockPoolManager. + void stop() { + shouldServiceRun = false; + if (bpThread != null) { + bpThread.interrupt(); + } + } + + //This must be called only by blockPoolManager + void join() { + try { + if (bpThread != null) { + bpThread.join(); + } + } catch (InterruptedException ie) { } + } + + //Cleanup method to be called by current thread before exiting. + private synchronized void cleanUp() { + + if(upgradeManager != null) + upgradeManager.shutdownUpgrade(); + shouldServiceRun = false; + RPC.stopProxy(bpNamenode); + dn.shutdownBlockPool(this); + } + + /** + * Main loop for each BP thread. Run until shutdown, + * forever calling remote NameNode functions. + */ + private void offerService() throws Exception { + LOG.info("For namenode " + nnAddr + " using DELETEREPORT_INTERVAL of " + + dnConf.deleteReportInterval + " msec " + " BLOCKREPORT_INTERVAL of " + + dnConf.blockReportInterval + "msec" + " Initial delay: " + + dnConf.initialBlockReportDelay + "msec" + "; heartBeatInterval=" + + dnConf.heartBeatInterval); + + // + // Now loop for a long time.... + // + while (shouldRun()) { + try { + long startTime = now(); + + // + // Every so often, send heartbeat or block-report + // + if (startTime - lastHeartbeat > dnConf.heartBeatInterval) { + // + // All heartbeat messages include following info: + // -- Datanode name + // -- data transfer port + // -- Total capacity + // -- Bytes remaining + // + lastHeartbeat = startTime; + if (!dn.areHeartbeatsDisabledForTests()) { + DatanodeCommand[] cmds = sendHeartBeat(); + dn.metrics.addHeartbeat(now() - startTime); + + long startProcessCommands = now(); + if (!processCommand(cmds)) + continue; + long endProcessCommands = now(); + if (endProcessCommands - startProcessCommands > 2000) { + LOG.info("Took " + (endProcessCommands - startProcessCommands) + + "ms to process " + cmds.length + " commands from NN"); + } + } + } + if (pendingReceivedRequests > 0 + || (startTime - lastDeletedReport > dnConf.deleteReportInterval)) { + reportReceivedDeletedBlocks(); + lastDeletedReport = startTime; + } + + DatanodeCommand cmd = blockReport(); + processCommand(cmd); + + // Now safe to start scanning the block pool + if (dn.blockScanner != null) { + dn.blockScanner.addBlockPool(this.getBlockPoolId()); + } + + // + // There is no work to do; sleep until hearbeat timer elapses, + // or work arrives, and then iterate again. + // + long waitTime = dnConf.heartBeatInterval - + (System.currentTimeMillis() - lastHeartbeat); + synchronized(receivedAndDeletedBlockList) { + if (waitTime > 0 && pendingReceivedRequests == 0) { + try { + receivedAndDeletedBlockList.wait(waitTime); + } catch (InterruptedException ie) { + LOG.warn("BPOfferService for " + this + " interrupted"); + } + } + } // synchronized + } catch(RemoteException re) { + String reClass = re.getClassName(); + if (UnregisteredNodeException.class.getName().equals(reClass) || + DisallowedDatanodeException.class.getName().equals(reClass) || + IncorrectVersionException.class.getName().equals(reClass)) { + LOG.warn(this + " is shutting down", re); + shouldServiceRun = false; + return; + } + LOG.warn("RemoteException in offerService", re); + try { + long sleepTime = Math.min(1000, dnConf.heartBeatInterval); + Thread.sleep(sleepTime); + } catch (InterruptedException ie) { + Thread.currentThread().interrupt(); + } + } catch (IOException e) { + LOG.warn("IOException in offerService", e); + } + } // while (shouldRun()) + } // offerService + + /** + * Register one bp with the corresponding NameNode + *

+ * The bpDatanode needs to register with the namenode on startup in order + * 1) to report which storage it is serving now and + * 2) to receive a registrationID + * + * issued by the namenode to recognize registered datanodes. + * + * @see FSNamesystem#registerDatanode(DatanodeRegistration) + * @throws IOException + */ + void register() throws IOException { + Preconditions.checkState(bpNSInfo != null, + "register() should be called after handshake()"); + + // The handshake() phase loaded the block pool storage + // off disk - so update the bpRegistration object from that info + bpRegistration = dn.createBPRegistration(bpNSInfo); + + LOG.info(this + " beginning handshake with NN"); + + while (shouldRun()) { + try { + // Use returned registration from namenode with updated machine name. + bpRegistration = bpNamenode.registerDatanode(bpRegistration); + break; + } catch(SocketTimeoutException e) { // namenode is busy + LOG.info("Problem connecting to server: " + nnAddr); + sleepAndLogInterrupts(1000, "connecting to server"); + } + } + + LOG.info("Block pool " + this + " successfully registered with NN"); + dn.bpRegistrationSucceeded(bpRegistration, getBlockPoolId()); + + // random short delay - helps scatter the BR from all DNs + scheduleBlockReport(dnConf.initialBlockReportDelay); + } + + + private void sleepAndLogInterrupts(int millis, + String stateString) { + try { + Thread.sleep(millis); + } catch (InterruptedException ie) { + LOG.info("BPOfferService " + this + + " interrupted while " + stateString); + } + } + + /** + * No matter what kind of exception we get, keep retrying to offerService(). + * That's the loop that connects to the NameNode and provides basic DataNode + * functionality. + * + * Only stop when "shouldRun" or "shouldServiceRun" is turned off, which can + * happen either at shutdown or due to refreshNamenodes. + */ + @Override + public void run() { + LOG.info(this + " starting to offer service"); + + try { + // init stuff + try { + // setup storage + connectToNNAndHandshake(); + } catch (IOException ioe) { + // Initial handshake, storage recovery or registration failed + // End BPOfferService thread + LOG.fatal("Initialization failed for block pool " + this, ioe); + return; + } + + initialized = true; // bp is initialized; + + while (shouldRun()) { + try { + startDistributedUpgradeIfNeeded(); + offerService(); + } catch (Exception ex) { + LOG.error("Exception in BPOfferService for " + this, ex); + sleepAndLogInterrupts(5000, "offering service"); + } + } + } catch (Throwable ex) { + LOG.warn("Unexpected exception in block pool " + this, ex); + } finally { + LOG.warn("Ending block pool service for: " + this); + cleanUp(); + } + } + + private boolean shouldRun() { + return shouldServiceRun && dn.shouldRun(); + } + + /** + * Process an array of datanode commands + * + * @param cmds an array of datanode commands + * @return true if further processing may be required or false otherwise. + */ + private boolean processCommand(DatanodeCommand[] cmds) { + if (cmds != null) { + for (DatanodeCommand cmd : cmds) { + try { + if (processCommand(cmd) == false) { + return false; + } + } catch (IOException ioe) { + LOG.warn("Error processing datanode Command", ioe); + } + } + } + return true; + } + + /** + * + * @param cmd + * @return true if further processing may be required or false otherwise. + * @throws IOException + */ + private boolean processCommand(DatanodeCommand cmd) throws IOException { + if (cmd == null) + return true; + final BlockCommand bcmd = + cmd instanceof BlockCommand? (BlockCommand)cmd: null; + + switch(cmd.getAction()) { + case DatanodeProtocol.DNA_TRANSFER: + // Send a copy of a block to another datanode + dn.transferBlocks(bcmd.getBlockPoolId(), bcmd.getBlocks(), bcmd.getTargets()); + dn.metrics.incrBlocksReplicated(bcmd.getBlocks().length); + break; + case DatanodeProtocol.DNA_INVALIDATE: + // + // Some local block(s) are obsolete and can be + // safely garbage-collected. + // + Block toDelete[] = bcmd.getBlocks(); + try { + if (dn.blockScanner != null) { + dn.blockScanner.deleteBlocks(bcmd.getBlockPoolId(), toDelete); + } + // using global fsdataset + dn.data.invalidate(bcmd.getBlockPoolId(), toDelete); + } catch(IOException e) { + dn.checkDiskError(); + throw e; + } + dn.metrics.incrBlocksRemoved(toDelete.length); + break; + case DatanodeProtocol.DNA_SHUTDOWN: + // shut down the data node + shouldServiceRun = false; + return false; + case DatanodeProtocol.DNA_REGISTER: + // namenode requested a registration - at start or if NN lost contact + LOG.info("DatanodeCommand action: DNA_REGISTER"); + if (shouldRun()) { + // re-retrieve namespace info to make sure that, if the NN + // was restarted, we still match its version (HDFS-2120) + retrieveNamespaceInfo(); + // and re-register + register(); + } + break; + case DatanodeProtocol.DNA_FINALIZE: + String bp = ((FinalizeCommand) cmd).getBlockPoolId(); + assert getBlockPoolId().equals(bp) : + "BP " + getBlockPoolId() + " received DNA_FINALIZE " + + "for other block pool " + bp; + + dn.finalizeUpgradeForPool(bp); + break; + case UpgradeCommand.UC_ACTION_START_UPGRADE: + // start distributed upgrade here + processDistributedUpgradeCommand((UpgradeCommand)cmd); + break; + case DatanodeProtocol.DNA_RECOVERBLOCK: + dn.recoverBlocks(((BlockRecoveryCommand)cmd).getRecoveringBlocks()); + break; + case DatanodeProtocol.DNA_ACCESSKEYUPDATE: + LOG.info("DatanodeCommand action: DNA_ACCESSKEYUPDATE"); + if (dn.isBlockTokenEnabled) { + dn.blockPoolTokenSecretManager.setKeys(getBlockPoolId(), + ((KeyUpdateCommand) cmd).getExportedKeys()); + } + break; + case DatanodeProtocol.DNA_BALANCERBANDWIDTHUPDATE: + LOG.info("DatanodeCommand action: DNA_BALANCERBANDWIDTHUPDATE"); + long bandwidth = + ((BalancerBandwidthCommand) cmd).getBalancerBandwidthValue(); + if (bandwidth > 0) { + DataXceiverServer dxcs = + (DataXceiverServer) dn.dataXceiverServer.getRunnable(); + dxcs.balanceThrottler.setBandwidth(bandwidth); + } + break; + default: + LOG.warn("Unknown DatanodeCommand action: " + cmd.getAction()); + } + return true; + } + + private void processDistributedUpgradeCommand(UpgradeCommand comm) + throws IOException { + UpgradeManagerDatanode upgradeManager = getUpgradeManager(); + upgradeManager.processUpgradeCommand(comm); + } + + synchronized UpgradeManagerDatanode getUpgradeManager() { + if(upgradeManager == null) + upgradeManager = + new UpgradeManagerDatanode(dn, getBlockPoolId()); + + return upgradeManager; + } + + /** + * Start distributed upgrade if it should be initiated by the data-node. + */ + private void startDistributedUpgradeIfNeeded() throws IOException { + UpgradeManagerDatanode um = getUpgradeManager(); + + if(!um.getUpgradeState()) + return; + um.setUpgradeState(false, um.getUpgradeVersion()); + um.startUpgrade(); + return; + } + +} \ No newline at end of file diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockMetadataHeader.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockMetadataHeader.java index ad5c6d878a..fd8aec7bac 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockMetadataHeader.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockMetadataHeader.java @@ -26,9 +26,11 @@ import java.io.IOException; import java.io.RandomAccessFile; -import org.apache.commons.httpclient.methods.GetMethod; import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.util.DataChecksum; +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; + /** @@ -36,7 +38,9 @@ * This is not related to the Block related functionality in Namenode. * The biggest part of data block metadata is CRC for the block. */ -class BlockMetadataHeader { +@InterfaceAudience.Private +@InterfaceStability.Evolving +public class BlockMetadataHeader { static final short METADATA_VERSION = FSDataset.METADATA_VERSION; @@ -52,12 +56,14 @@ class BlockMetadataHeader { this.checksum = checksum; this.version = version; } - - short getVersion() { + + /** Get the version */ + public short getVersion() { return version; } - DataChecksum getChecksum() { + /** Get the checksum */ + public DataChecksum getChecksum() { return checksum; } @@ -68,7 +74,7 @@ DataChecksum getChecksum() { * @return Metadata Header * @throws IOException */ - static BlockMetadataHeader readHeader(DataInputStream in) throws IOException { + public static BlockMetadataHeader readHeader(DataInputStream in) throws IOException { return readHeader(in.readShort(), in); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceScanner.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceScanner.java index 579eb8ed1a..a666149beb 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceScanner.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceScanner.java @@ -34,7 +34,6 @@ import java.util.HashMap; import java.util.Iterator; import java.util.List; -import java.util.Random; import java.util.TreeSet; import java.util.regex.Matcher; import java.util.regex.Pattern; @@ -252,8 +251,9 @@ private synchronized long getNewBlockScanTime() { */ long period = Math.min(scanPeriod, Math.max(blockMap.size(),1) * 600 * 1000L); + int periodInt = Math.abs((int)period); return System.currentTimeMillis() - scanPeriod + - DFSUtil.getRandom().nextInt((int)period); + DFSUtil.getRandom().nextInt(periodInt); } /** Adds block to list of blocks */ diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java index 4b961522d6..61bc29acf4 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java @@ -50,7 +50,6 @@ import org.apache.hadoop.io.nativeio.NativeIO; import org.apache.hadoop.util.Daemon; import org.apache.hadoop.util.DataChecksum; -import org.apache.hadoop.util.PureJavaCrc32; /** A class that receives a block and writes to its own disk, meanwhile * may copies it to another site. If a throttler is provided, diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataBlockScanner.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataBlockScanner.java index 535619e096..e14aaf63c7 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataBlockScanner.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataBlockScanner.java @@ -31,7 +31,6 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.protocol.ExtendedBlock; -import org.apache.hadoop.hdfs.server.datanode.DataNode.BPOfferService; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java index df8a2adcca..65ccba80dc 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java @@ -37,6 +37,7 @@ import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_HOST_NAME_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_HTTP_ADDRESS_DEFAULT; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_HTTP_ADDRESS_KEY; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_HTTPS_ADDRESS_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_IPC_ADDRESS_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_KEYTAB_FILE_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_PLUGINS_KEY; @@ -48,11 +49,12 @@ import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_STORAGEID_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_USER_NAME_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_FEDERATION_NAMESERVICES; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HTTPS_ENABLE_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_WEBHDFS_ENABLED_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_WEBHDFS_ENABLED_DEFAULT; -import static org.apache.hadoop.hdfs.server.common.Util.now; import java.io.BufferedOutputStream; +import java.io.ByteArrayInputStream; import java.io.DataInputStream; import java.io.DataOutputStream; import java.io.File; @@ -61,7 +63,6 @@ import java.net.InetSocketAddress; import java.net.ServerSocket; import java.net.Socket; -import java.net.SocketTimeoutException; import java.net.URI; import java.net.UnknownHostException; import java.nio.channels.ServerSocketChannel; @@ -74,7 +75,6 @@ import java.util.Collection; import java.util.EnumSet; import java.util.HashMap; -import java.util.LinkedList; import java.util.List; import java.util.Map; import java.util.Set; @@ -90,11 +90,11 @@ import org.apache.hadoop.fs.LocalFileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.permission.FsPermission; +import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DFSUtil; import org.apache.hadoop.hdfs.HDFSPolicyProvider; import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.protocol.Block; -import org.apache.hadoop.hdfs.protocol.BlockListAsLongs; import org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol; import org.apache.hadoop.hdfs.protocol.DatanodeID; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; @@ -103,7 +103,6 @@ import org.apache.hadoop.hdfs.protocol.HdfsProtoUtil; import org.apache.hadoop.hdfs.protocol.LocatedBlock; import org.apache.hadoop.hdfs.protocol.RecoveryInProgressException; -import org.apache.hadoop.hdfs.protocol.UnregisteredNodeException; import org.apache.hadoop.hdfs.protocol.datatransfer.BlockConstructionStage; import org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtocol; import org.apache.hadoop.hdfs.protocol.datatransfer.Sender; @@ -113,38 +112,28 @@ import org.apache.hadoop.hdfs.security.token.block.BlockPoolTokenSecretManager; import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier; import org.apache.hadoop.hdfs.security.token.block.BlockTokenSecretManager; +import org.apache.hadoop.hdfs.security.token.block.BlockTokenSecretManager.AccessMode; import org.apache.hadoop.hdfs.security.token.block.ExportedBlockKeys; import org.apache.hadoop.hdfs.security.token.block.InvalidBlockTokenException; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption; -import org.apache.hadoop.hdfs.server.common.IncorrectVersionException; import org.apache.hadoop.hdfs.server.common.JspHelper; -import org.apache.hadoop.hdfs.server.common.Storage; import org.apache.hadoop.hdfs.server.common.StorageInfo; import org.apache.hadoop.hdfs.server.common.Util; import org.apache.hadoop.hdfs.server.datanode.FSDataset.VolumeInfo; import org.apache.hadoop.hdfs.server.datanode.SecureDataNodeStarter.SecureResources; import org.apache.hadoop.hdfs.server.datanode.metrics.DataNodeMetrics; import org.apache.hadoop.hdfs.server.datanode.web.resources.DatanodeWebHdfsMethods; -import org.apache.hadoop.hdfs.server.namenode.FSNamesystem; import org.apache.hadoop.hdfs.server.namenode.FileChecksumServlets; import org.apache.hadoop.hdfs.server.namenode.StreamFile; -import org.apache.hadoop.hdfs.server.protocol.BalancerBandwidthCommand; -import org.apache.hadoop.hdfs.server.protocol.BlockCommand; -import org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand; import org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringBlock; -import org.apache.hadoop.hdfs.server.protocol.DatanodeCommand; import org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol; import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration; -import org.apache.hadoop.hdfs.server.protocol.DisallowedDatanodeException; -import org.apache.hadoop.hdfs.server.protocol.FinalizeCommand; import org.apache.hadoop.hdfs.server.protocol.InterDatanodeProtocol; -import org.apache.hadoop.hdfs.server.protocol.KeyUpdateCommand; import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo; -import org.apache.hadoop.hdfs.server.protocol.ReceivedDeletedBlockInfo; import org.apache.hadoop.hdfs.server.protocol.ReplicaRecoveryInfo; -import org.apache.hadoop.hdfs.server.protocol.UpgradeCommand; +import org.apache.hadoop.hdfs.protocol.BlockLocalPathInfo; import org.apache.hadoop.hdfs.server.protocolR23Compatible.InterDatanodeProtocolServerSideTranslatorR23; import org.apache.hadoop.hdfs.server.protocolR23Compatible.InterDatanodeProtocolTranslatorR23; import org.apache.hadoop.hdfs.server.protocolR23Compatible.InterDatanodeWireProtocol; @@ -160,8 +149,10 @@ import org.apache.hadoop.metrics2.util.MBeans; import org.apache.hadoop.net.DNS; import org.apache.hadoop.net.NetUtils; +import org.apache.hadoop.security.AccessControlException; import org.apache.hadoop.security.SecurityUtil; import org.apache.hadoop.security.UserGroupInformation; +import org.apache.hadoop.security.UserGroupInformation.AuthenticationMethod; import org.apache.hadoop.security.authorize.AccessControlList; import org.apache.hadoop.security.token.Token; import org.apache.hadoop.security.token.TokenIdentifier; @@ -176,6 +167,8 @@ import org.apache.hadoop.util.VersionInfo; import org.mortbay.util.ajax.JSON; +import com.google.common.base.Preconditions; + /********************************************************** * DataNode is a class (and program) that stores a set of @@ -236,8 +229,7 @@ public class DataNode extends Configured * Use {@link NetUtils#createSocketAddr(String)} instead. */ @Deprecated - public static InetSocketAddress createSocketAddr(String target - ) throws IOException { + public static InetSocketAddress createSocketAddr(String target) { return NetUtils.createSocketAddr(target); } @@ -331,14 +323,14 @@ public Object run() throws Exception { } } - void joinAll() throws InterruptedException { + void joinAll() { for (BPOfferService bpos: this.getAllNamenodeThreads()) { bpos.join(); } } void refreshNamenodes(Configuration conf) - throws IOException, InterruptedException { + throws IOException { LOG.info("Refresh request received for nameservices: " + conf.get(DFS_FEDERATION_NAMESERVICES)); List newAddresses = @@ -396,8 +388,6 @@ void refreshNamenodes(Configuration conf) private volatile String hostName; // Host name of this datanode - private static String dnThreadName; - boolean isBlockTokenEnabled; BlockPoolTokenSecretManager blockPoolTokenSecretManager; @@ -414,6 +404,8 @@ void refreshNamenodes(Configuration conf) private AbstractList dataDirs; private Configuration conf; + private final String userWithLocalPathAccess; + /** * Create the DataNode given a configuration and an array of dataDirs. * 'dataDirs' is where the blocks are stored. @@ -432,6 +424,8 @@ void refreshNamenodes(Configuration conf) final SecureResources resources) throws IOException { super(conf); + this.userWithLocalPathAccess = conf + .get(DFSConfigKeys.DFS_BLOCK_LOCAL_PATH_ACCESS_USER_KEY); try { hostName = getHostName(conf); startDataNode(conf, dataDirs, resources); @@ -452,11 +446,8 @@ private synchronized void setClusterId(String cid) throws IOException { private static String getHostName(Configuration config) throws UnknownHostException { - String name = null; // use configured nameserver & interface to get local hostname - if (config.get(DFS_DATANODE_HOST_NAME_KEY) != null) { - name = config.get(DFS_DATANODE_HOST_NAME_KEY); - } + String name = config.get(DFS_DATANODE_HOST_NAME_KEY); if (name == null) { name = DNS .getDefaultHost(config.get(DFS_DATANODE_DNS_INTERFACE_KEY, @@ -481,11 +472,11 @@ conf, new AccessControlList(conf.get(DFS_ADMIN, " ")), if(LOG.isDebugEnabled()) { LOG.debug("Datanode listening on " + infoHost + ":" + tmpInfoPort); } - if (conf.getBoolean("dfs.https.enable", false)) { + if (conf.getBoolean(DFS_HTTPS_ENABLE_KEY, false)) { boolean needClientAuth = conf.getBoolean(DFS_CLIENT_HTTPS_NEED_AUTH_KEY, DFS_CLIENT_HTTPS_NEED_AUTH_DEFAULT); InetSocketAddress secInfoSocAddr = NetUtils.createSocketAddr(conf.get( - "dfs.datanode.https.address", infoHost + ":" + 0)); + DFS_DATANODE_HTTPS_ADDRESS_KEY, infoHost + ":" + 0)); Configuration sslConf = new HdfsConfiguration(false); sslConf.addResource(conf.get("dfs.https.server.keystore.resource", "ssl-server.xml")); @@ -526,7 +517,7 @@ private void startPlugins(Configuration conf) { private void initIpcServer(Configuration conf) throws IOException { InetSocketAddress ipcAddr = NetUtils.createSocketAddr( - conf.get("dfs.datanode.ipc.address")); + conf.get(DFS_DATANODE_IPC_ADDRESS_KEY)); // Add all the RPC protocols that the Datanode implements ClientDatanodeProtocolServerSideTranslatorR23 @@ -692,679 +683,10 @@ void setHeartbeatsDisabledForTests( this.heartbeatsDisabledForTests = heartbeatsDisabledForTests; } - /** - * A thread per namenode to perform: - *

    - *
  • Pre-registration handshake with namenode
  • - *
  • Registration with namenode
  • - *
  • Send periodic heartbeats to the namenode
  • - *
  • Handle commands received from the datanode
  • - *
- */ - @InterfaceAudience.Private - static class BPOfferService implements Runnable { - final InetSocketAddress nnAddr; - DatanodeRegistration bpRegistration; - NamespaceInfo bpNSInfo; - long lastBlockReport = 0; - long lastDeletedReport = 0; - - boolean resetBlockReportTime = true; - - private Thread bpThread; - private DatanodeProtocol bpNamenode; - private String blockPoolId; - private long lastHeartbeat = 0; - private volatile boolean initialized = false; - private final LinkedList receivedAndDeletedBlockList - = new LinkedList(); - private volatile int pendingReceivedRequests = 0; - private volatile boolean shouldServiceRun = true; - UpgradeManagerDatanode upgradeManager = null; - private final DataNode dn; - private final DNConf dnConf; - - BPOfferService(InetSocketAddress nnAddr, DataNode dn) { - this.dn = dn; - this.bpRegistration = dn.createRegistration(); - this.nnAddr = nnAddr; - this.dnConf = dn.getDnConf(); - } - - /** - * returns true if BP thread has completed initialization of storage - * and has registered with the corresponding namenode - * @return true if initialized - */ - public boolean initialized() { - return initialized; - } - - public boolean isAlive() { - return shouldServiceRun && bpThread.isAlive(); - } - - public String getBlockPoolId() { - return blockPoolId; - } - - private InetSocketAddress getNNSocketAddress() { - return nnAddr; - } - - void setNamespaceInfo(NamespaceInfo nsinfo) { - bpNSInfo = nsinfo; - this.blockPoolId = nsinfo.getBlockPoolID(); - } - - void setNameNode(DatanodeProtocol dnProtocol) { - bpNamenode = dnProtocol; - } - - private NamespaceInfo handshake() throws IOException { - NamespaceInfo nsInfo = new NamespaceInfo(); - while (dn.shouldRun && shouldServiceRun) { - try { - nsInfo = bpNamenode.versionRequest(); - // verify build version - String nsVer = nsInfo.getBuildVersion(); - String stVer = Storage.getBuildVersion(); - LOG.info("handshake: namespace info = " + nsInfo); - - if(! nsVer.equals(stVer)) { - String errorMsg = "Incompatible build versions: bp = " + blockPoolId + - "namenode BV = " + nsVer + "; datanode BV = " + stVer; - LOG.warn(errorMsg); - bpNamenode.errorReport( bpRegistration, - DatanodeProtocol.NOTIFY, errorMsg ); - } else { - break; - } - } catch(SocketTimeoutException e) { // namenode is busy - LOG.warn("Problem connecting to server: " + nnAddr); - } catch(IOException e ) { // namenode is not available - LOG.warn("Problem connecting to server: " + nnAddr); - } - - // try again in a second - try { - Thread.sleep(5000); - } catch (InterruptedException ie) {} - } - - assert HdfsConstants.LAYOUT_VERSION == nsInfo.getLayoutVersion() : - "Data-node and name-node layout versions must be the same." - + "Expected: "+ HdfsConstants.LAYOUT_VERSION - + " actual "+ nsInfo.getLayoutVersion(); - return nsInfo; - } - - void setupBP(Configuration conf) - throws IOException { - // get NN proxy - DatanodeProtocol dnp = - (DatanodeProtocol)RPC.waitForProxy(DatanodeProtocol.class, - DatanodeProtocol.versionID, nnAddr, conf); - setNameNode(dnp); - - // handshake with NN - NamespaceInfo nsInfo = handshake(); - setNamespaceInfo(nsInfo); - dn.initBlockPool(this, nsInfo); - - bpRegistration.setStorageID(dn.getStorageId()); - StorageInfo storageInfo = dn.storage.getBPStorage(blockPoolId); - if (storageInfo == null) { - // it's null in the case of SimulatedDataSet - bpRegistration.storageInfo.layoutVersion = HdfsConstants.LAYOUT_VERSION; - bpRegistration.setStorageInfo(nsInfo); - } else { - bpRegistration.setStorageInfo(storageInfo); - } - } - - /** - * This methods arranges for the data node to send the block report at - * the next heartbeat. - */ - void scheduleBlockReport(long delay) { - if (delay > 0) { // send BR after random delay - lastBlockReport = System.currentTimeMillis() - - ( dnConf.blockReportInterval - DFSUtil.getRandom().nextInt((int)(delay))); - } else { // send at next heartbeat - lastBlockReport = lastHeartbeat - dnConf.blockReportInterval; - } - resetBlockReportTime = true; // reset future BRs for randomness - } - - private void reportBadBlocks(ExtendedBlock block) { - DatanodeInfo[] dnArr = { new DatanodeInfo(bpRegistration) }; - LocatedBlock[] blocks = { new LocatedBlock(block, dnArr) }; - - try { - bpNamenode.reportBadBlocks(blocks); - } catch (IOException e){ - /* One common reason is that NameNode could be in safe mode. - * Should we keep on retrying in that case? - */ - LOG.warn("Failed to report bad block " + block + " to namenode : " - + " Exception", e); - } - - } - - /** - * Report received blocks and delete hints to the Namenode - * - * @throws IOException - */ - private void reportReceivedDeletedBlocks() throws IOException { - - // check if there are newly received blocks - ReceivedDeletedBlockInfo[] receivedAndDeletedBlockArray = null; - int currentReceivedRequestsCounter; - synchronized (receivedAndDeletedBlockList) { - currentReceivedRequestsCounter = pendingReceivedRequests; - int numBlocks = receivedAndDeletedBlockList.size(); - if (numBlocks > 0) { - // - // Send newly-received and deleted blockids to namenode - // - receivedAndDeletedBlockArray = receivedAndDeletedBlockList - .toArray(new ReceivedDeletedBlockInfo[numBlocks]); - } - } - if (receivedAndDeletedBlockArray != null) { - bpNamenode.blockReceivedAndDeleted(bpRegistration, blockPoolId, - receivedAndDeletedBlockArray); - synchronized (receivedAndDeletedBlockList) { - for (int i = 0; i < receivedAndDeletedBlockArray.length; i++) { - receivedAndDeletedBlockList.remove(receivedAndDeletedBlockArray[i]); - } - pendingReceivedRequests -= currentReceivedRequestsCounter; - } - } - } - - /* - * Informing the name node could take a long long time! Should we wait - * till namenode is informed before responding with success to the - * client? For now we don't. - */ - void notifyNamenodeReceivedBlock(ExtendedBlock block, String delHint) { - if (block == null || delHint == null) { - throw new IllegalArgumentException(block == null ? "Block is null" - : "delHint is null"); - } - - if (!block.getBlockPoolId().equals(blockPoolId)) { - LOG.warn("BlockPool mismatch " + block.getBlockPoolId() + " vs. " - + blockPoolId); - return; - } - - synchronized (receivedAndDeletedBlockList) { - receivedAndDeletedBlockList.add(new ReceivedDeletedBlockInfo(block - .getLocalBlock(), delHint)); - pendingReceivedRequests++; - receivedAndDeletedBlockList.notifyAll(); - } - } - - void notifyNamenodeDeletedBlock(ExtendedBlock block) { - if (block == null) { - throw new IllegalArgumentException("Block is null"); - } - - if (!block.getBlockPoolId().equals(blockPoolId)) { - LOG.warn("BlockPool mismatch " + block.getBlockPoolId() + " vs. " - + blockPoolId); - return; - } - - synchronized (receivedAndDeletedBlockList) { - receivedAndDeletedBlockList.add(new ReceivedDeletedBlockInfo(block - .getLocalBlock(), ReceivedDeletedBlockInfo.TODELETE_HINT)); - } - } - - - /** - * Report the list blocks to the Namenode - * @throws IOException - */ - DatanodeCommand blockReport() throws IOException { - // send block report if timer has expired. - DatanodeCommand cmd = null; - long startTime = now(); - if (startTime - lastBlockReport > dnConf.blockReportInterval) { - - // Create block report - long brCreateStartTime = now(); - BlockListAsLongs bReport = dn.data.getBlockReport(blockPoolId); - - // Send block report - long brSendStartTime = now(); - cmd = bpNamenode.blockReport(bpRegistration, blockPoolId, bReport - .getBlockListAsLongs()); - - // Log the block report processing stats from Datanode perspective - long brSendCost = now() - brSendStartTime; - long brCreateCost = brSendStartTime - brCreateStartTime; - dn.metrics.addBlockReport(brSendCost); - LOG.info("BlockReport of " + bReport.getNumberOfBlocks() - + " blocks took " + brCreateCost + " msec to generate and " - + brSendCost + " msecs for RPC and NN processing"); - - // If we have sent the first block report, then wait a random - // time before we start the periodic block reports. - if (resetBlockReportTime) { - lastBlockReport = startTime - DFSUtil.getRandom().nextInt((int)(dnConf.blockReportInterval)); - resetBlockReportTime = false; - } else { - /* say the last block report was at 8:20:14. The current report - * should have started around 9:20:14 (default 1 hour interval). - * If current time is : - * 1) normal like 9:20:18, next report should be at 10:20:14 - * 2) unexpected like 11:35:43, next report should be at 12:20:14 - */ - lastBlockReport += (now() - lastBlockReport) / - dnConf.blockReportInterval * dnConf.blockReportInterval; - } - LOG.info("sent block report, processed command:" + cmd); - } - return cmd; - } - - - DatanodeCommand [] sendHeartBeat() throws IOException { - return bpNamenode.sendHeartbeat(bpRegistration, - dn.data.getCapacity(), - dn.data.getDfsUsed(), - dn.data.getRemaining(), - dn.data.getBlockPoolUsed(blockPoolId), - dn.xmitsInProgress.get(), - dn.getXceiverCount(), dn.data.getNumFailedVolumes()); - } - - //This must be called only by blockPoolManager - void start() { - if ((bpThread != null) && (bpThread.isAlive())) { - //Thread is started already - return; - } - bpThread = new Thread(this, dnThreadName); - bpThread.setDaemon(true); // needed for JUnit testing - bpThread.start(); - } - - //This must be called only by blockPoolManager. - void stop() { - shouldServiceRun = false; - if (bpThread != null) { - bpThread.interrupt(); - } - } - - //This must be called only by blockPoolManager - void join() { - try { - if (bpThread != null) { - bpThread.join(); - } - } catch (InterruptedException ie) { } - } - - //Cleanup method to be called by current thread before exiting. - private synchronized void cleanUp() { - - if(upgradeManager != null) - upgradeManager.shutdownUpgrade(); - shouldServiceRun = false; - RPC.stopProxy(bpNamenode); - dn.shutdownBlockPool(this); - } - - /** - * Main loop for each BP thread. Run until shutdown, - * forever calling remote NameNode functions. - */ - private void offerService() throws Exception { - LOG.info("For namenode " + nnAddr + " using DELETEREPORT_INTERVAL of " - + dnConf.deleteReportInterval + " msec " + " BLOCKREPORT_INTERVAL of " - + dnConf.blockReportInterval + "msec" + " Initial delay: " - + dnConf.initialBlockReportDelay + "msec" + "; heartBeatInterval=" - + dnConf.heartBeatInterval); - - // - // Now loop for a long time.... - // - while (dn.shouldRun && shouldServiceRun) { - try { - long startTime = now(); - - // - // Every so often, send heartbeat or block-report - // - if (startTime - lastHeartbeat > dnConf.heartBeatInterval) { - // - // All heartbeat messages include following info: - // -- Datanode name - // -- data transfer port - // -- Total capacity - // -- Bytes remaining - // - lastHeartbeat = startTime; - if (!dn.heartbeatsDisabledForTests) { - DatanodeCommand[] cmds = sendHeartBeat(); - dn.metrics.addHeartbeat(now() - startTime); - - long startProcessCommands = now(); - if (!processCommand(cmds)) - continue; - long endProcessCommands = now(); - if (endProcessCommands - startProcessCommands > 2000) { - LOG.info("Took " + (endProcessCommands - startProcessCommands) + - "ms to process " + cmds.length + " commands from NN"); - } - } - } - if (pendingReceivedRequests > 0 - || (startTime - lastDeletedReport > dnConf.deleteReportInterval)) { - reportReceivedDeletedBlocks(); - lastDeletedReport = startTime; - } - - DatanodeCommand cmd = blockReport(); - processCommand(cmd); - - // Now safe to start scanning the block pool - if (dn.blockScanner != null) { - dn.blockScanner.addBlockPool(this.blockPoolId); - } - - // - // There is no work to do; sleep until hearbeat timer elapses, - // or work arrives, and then iterate again. - // - long waitTime = dnConf.heartBeatInterval - - (System.currentTimeMillis() - lastHeartbeat); - synchronized(receivedAndDeletedBlockList) { - if (waitTime > 0 && pendingReceivedRequests == 0) { - try { - receivedAndDeletedBlockList.wait(waitTime); - } catch (InterruptedException ie) { - LOG.warn("BPOfferService for block pool=" - + this.getBlockPoolId() + " received exception:" + ie); - } - } - } // synchronized - } catch(RemoteException re) { - String reClass = re.getClassName(); - if (UnregisteredNodeException.class.getName().equals(reClass) || - DisallowedDatanodeException.class.getName().equals(reClass) || - IncorrectVersionException.class.getName().equals(reClass)) { - LOG.warn("blockpool " + blockPoolId + " is shutting down", re); - shouldServiceRun = false; - return; - } - LOG.warn("RemoteException in offerService", re); - try { - long sleepTime = Math.min(1000, dnConf.heartBeatInterval); - Thread.sleep(sleepTime); - } catch (InterruptedException ie) { - Thread.currentThread().interrupt(); - } - } catch (IOException e) { - LOG.warn("IOException in offerService", e); - } - } // while (shouldRun && shouldServiceRun) - } // offerService - - /** - * Register one bp with the corresponding NameNode - *

- * The bpDatanode needs to register with the namenode on startup in order - * 1) to report which storage it is serving now and - * 2) to receive a registrationID - * - * issued by the namenode to recognize registered datanodes. - * - * @see FSNamesystem#registerDatanode(DatanodeRegistration) - * @throws IOException - */ - void register() throws IOException { - LOG.info("in register: sid=" + bpRegistration.getStorageID() + ";SI=" - + bpRegistration.storageInfo); - - // build and layout versions should match - String nsBuildVer = bpNamenode.versionRequest().getBuildVersion(); - String stBuildVer = Storage.getBuildVersion(); - - if (!nsBuildVer.equals(stBuildVer)) { - LOG.warn("Data-node and name-node Build versions must be " + - "the same. Namenode build version: " + nsBuildVer + "Datanode " + - "build version: " + stBuildVer); - throw new IncorrectVersionException(nsBuildVer, "namenode", stBuildVer); - } - - if (HdfsConstants.LAYOUT_VERSION != bpNSInfo.getLayoutVersion()) { - LOG.warn("Data-node and name-node layout versions must be " + - "the same. Expected: "+ HdfsConstants.LAYOUT_VERSION + - " actual "+ bpNSInfo.getLayoutVersion()); - throw new IncorrectVersionException - (bpNSInfo.getLayoutVersion(), "namenode"); - } - - while(dn.shouldRun && shouldServiceRun) { - try { - // Use returned registration from namenode with updated machine name. - bpRegistration = bpNamenode.registerDatanode(bpRegistration); - - LOG.info("bpReg after =" + bpRegistration.storageInfo + - ";sid=" + bpRegistration.storageID + ";name="+bpRegistration.getName()); - - break; - } catch(SocketTimeoutException e) { // namenode is busy - LOG.info("Problem connecting to server: " + nnAddr); - try { - Thread.sleep(1000); - } catch (InterruptedException ie) {} - } - } - - dn.bpRegistrationSucceeded(bpRegistration, blockPoolId); - - LOG.info("in register:" + ";bpDNR="+bpRegistration.storageInfo); - - // random short delay - helps scatter the BR from all DNs - scheduleBlockReport(dnConf.initialBlockReportDelay); - } - - - /** - * No matter what kind of exception we get, keep retrying to offerService(). - * That's the loop that connects to the NameNode and provides basic DataNode - * functionality. - * - * Only stop when "shouldRun" or "shouldServiceRun" is turned off, which can - * happen either at shutdown or due to refreshNamenodes. - */ - @Override - public void run() { - LOG.info(bpRegistration + "In BPOfferService.run, data = " + dn.data - + ";bp=" + blockPoolId); - - try { - // init stuff - try { - // setup storage - setupBP(dn.conf); - register(); - } catch (IOException ioe) { - // Initial handshake, storage recovery or registration failed - // End BPOfferService thread - LOG.fatal(bpRegistration + " initialization failed for block pool " - + blockPoolId, ioe); - return; - } - - initialized = true; // bp is initialized; - - while (dn.shouldRun && shouldServiceRun) { - try { - startDistributedUpgradeIfNeeded(); - offerService(); - } catch (Exception ex) { - LOG.error("Exception in BPOfferService", ex); - if (dn.shouldRun && shouldServiceRun) { - try { - Thread.sleep(5000); - } catch (InterruptedException ie) { - LOG.warn("Received exception", ie); - } - } - } - } - } catch (Throwable ex) { - LOG.warn("Unexpected exception", ex); - } finally { - LOG.warn(bpRegistration + " ending block pool service for: " - + blockPoolId + " thread " + Thread.currentThread().getId()); - cleanUp(); - } - } - - /** - * Process an array of datanode commands - * - * @param cmds an array of datanode commands - * @return true if further processing may be required or false otherwise. - */ - private boolean processCommand(DatanodeCommand[] cmds) { - if (cmds != null) { - for (DatanodeCommand cmd : cmds) { - try { - if (processCommand(cmd) == false) { - return false; - } - } catch (IOException ioe) { - LOG.warn("Error processing datanode Command", ioe); - } - } - } - return true; - } - - /** - * - * @param cmd - * @return true if further processing may be required or false otherwise. - * @throws IOException - */ - private boolean processCommand(DatanodeCommand cmd) throws IOException { - if (cmd == null) - return true; - final BlockCommand bcmd = - cmd instanceof BlockCommand? (BlockCommand)cmd: null; - - switch(cmd.getAction()) { - case DatanodeProtocol.DNA_TRANSFER: - // Send a copy of a block to another datanode - dn.transferBlocks(bcmd.getBlockPoolId(), bcmd.getBlocks(), bcmd.getTargets()); - dn.metrics.incrBlocksReplicated(bcmd.getBlocks().length); - break; - case DatanodeProtocol.DNA_INVALIDATE: - // - // Some local block(s) are obsolete and can be - // safely garbage-collected. - // - Block toDelete[] = bcmd.getBlocks(); - try { - if (dn.blockScanner != null) { - dn.blockScanner.deleteBlocks(bcmd.getBlockPoolId(), toDelete); - } - // using global fsdataset - dn.data.invalidate(bcmd.getBlockPoolId(), toDelete); - } catch(IOException e) { - dn.checkDiskError(); - throw e; - } - dn.metrics.incrBlocksRemoved(toDelete.length); - break; - case DatanodeProtocol.DNA_SHUTDOWN: - // shut down the data node - shouldServiceRun = false; - return false; - case DatanodeProtocol.DNA_REGISTER: - // namenode requested a registration - at start or if NN lost contact - LOG.info("DatanodeCommand action: DNA_REGISTER"); - if (dn.shouldRun && shouldServiceRun) { - register(); - } - break; - case DatanodeProtocol.DNA_FINALIZE: - dn.storage.finalizeUpgrade(((FinalizeCommand) cmd) - .getBlockPoolId()); - break; - case UpgradeCommand.UC_ACTION_START_UPGRADE: - // start distributed upgrade here - processDistributedUpgradeCommand((UpgradeCommand)cmd); - break; - case DatanodeProtocol.DNA_RECOVERBLOCK: - dn.recoverBlocks(((BlockRecoveryCommand)cmd).getRecoveringBlocks()); - break; - case DatanodeProtocol.DNA_ACCESSKEYUPDATE: - LOG.info("DatanodeCommand action: DNA_ACCESSKEYUPDATE"); - if (dn.isBlockTokenEnabled) { - dn.blockPoolTokenSecretManager.setKeys(blockPoolId, - ((KeyUpdateCommand) cmd).getExportedKeys()); - } - break; - case DatanodeProtocol.DNA_BALANCERBANDWIDTHUPDATE: - LOG.info("DatanodeCommand action: DNA_BALANCERBANDWIDTHUPDATE"); - long bandwidth = - ((BalancerBandwidthCommand) cmd).getBalancerBandwidthValue(); - if (bandwidth > 0) { - DataXceiverServer dxcs = - (DataXceiverServer) dn.dataXceiverServer.getRunnable(); - dxcs.balanceThrottler.setBandwidth(bandwidth); - } - break; - default: - LOG.warn("Unknown DatanodeCommand action: " + cmd.getAction()); - } - return true; - } - - private void processDistributedUpgradeCommand(UpgradeCommand comm) - throws IOException { - UpgradeManagerDatanode upgradeManager = getUpgradeManager(); - upgradeManager.processUpgradeCommand(comm); - } - - synchronized UpgradeManagerDatanode getUpgradeManager() { - if(upgradeManager == null) - upgradeManager = - new UpgradeManagerDatanode(dn, blockPoolId); - - return upgradeManager; - } - - /** - * Start distributed upgrade if it should be initiated by the data-node. - */ - private void startDistributedUpgradeIfNeeded() throws IOException { - UpgradeManagerDatanode um = getUpgradeManager(); - - if(!um.getUpgradeState()) - return; - um.setUpgradeState(false, um.getUpgradeVersion()); - um.startUpgrade(); - return; - } + boolean areHeartbeatsDisabledForTests() { + return this.heartbeatsDisabledForTests; } - + /** * This method starts the data node with the specified conf. * @@ -1406,13 +728,33 @@ void startDataNode(Configuration conf, blockPoolManager = new BlockPoolManager(conf); } + /** + * Create a DatanodeRegistration for a specific block pool. + * @param nsInfo the namespace info from the first part of the NN handshake + */ + DatanodeRegistration createBPRegistration(NamespaceInfo nsInfo) { + DatanodeRegistration bpRegistration = createUnknownBPRegistration(); + String blockPoolId = nsInfo.getBlockPoolID(); + + bpRegistration.setStorageID(getStorageId()); + StorageInfo storageInfo = storage.getBPStorage(blockPoolId); + if (storageInfo == null) { + // it's null in the case of SimulatedDataSet + bpRegistration.storageInfo.layoutVersion = HdfsConstants.LAYOUT_VERSION; + bpRegistration.setStorageInfo(nsInfo); + } else { + bpRegistration.setStorageInfo(storageInfo); + } + return bpRegistration; + } + /** * Check that the registration returned from a NameNode is consistent * with the information in the storage. If the storage is fresh/unformatted, * sets the storage ID based on this registration. * Also updates the block pool's state in the secret manager. */ - private synchronized void bpRegistrationSucceeded(DatanodeRegistration bpRegistration, + synchronized void bpRegistrationSucceeded(DatanodeRegistration bpRegistration, String blockPoolId) throws IOException { hostName = bpRegistration.getHost(); @@ -1469,7 +811,7 @@ private void registerBlockPoolWithSecretManager(DatanodeRegistration bpRegistrat /** * Remove the given block pool from the block scanner, dataset, and storage. */ - private void shutdownBlockPool(BPOfferService bpos) { + void shutdownBlockPool(BPOfferService bpos) { blockPoolManager.remove(bpos); String bpId = bpos.getBlockPoolId(); @@ -1486,11 +828,27 @@ private void shutdownBlockPool(BPOfferService bpos) { } } - void initBlockPool(BPOfferService bpOfferService, - NamespaceInfo nsInfo) throws IOException { + /** + * One of the Block Pools has successfully connected to its NN. + * This initializes the local storage for that block pool, + * checks consistency of the NN's cluster ID, etc. + * + * If this is the first block pool to register, this also initializes + * the datanode-scoped storage. + * + * @param nsInfo the handshake response from the NN. + * @throws IOException if the NN is inconsistent with the local storage. + */ + void initBlockPool(BPOfferService bpos) throws IOException { + NamespaceInfo nsInfo = bpos.getNamespaceInfo(); + Preconditions.checkState(nsInfo != null, + "Block pool " + bpos + " should have retrieved " + + "its namespace info before calling initBlockPool."); + String blockPoolId = nsInfo.getBlockPoolID(); - blockPoolManager.addBlockPool(bpOfferService); + // Register the new block pool with the BP manager. + blockPoolManager.addBlockPool(bpos); synchronized (this) { // we do not allow namenode from different cluster to register @@ -1521,12 +879,21 @@ void initBlockPool(BPOfferService bpOfferService, + blockPoolId + ";lv=" + storage.getLayoutVersion() + ";nsInfo=" + nsInfo); } + + // In the case that this is the first block pool to connect, initialize + // the dataset, block scanners, etc. initFsDataSet(); - initPeriodicScanners(conf); - data.addBlockPool(nsInfo.getBlockPoolID(), conf); + initPeriodicScanners(conf); + + data.addBlockPool(blockPoolId, conf); } - private DatanodeRegistration createRegistration() { + /** + * Create a DatanodeRegistration object with no valid StorageInfo. + * This is used when reporting an error during handshake - ie + * before we can load any specific block pool. + */ + private DatanodeRegistration createUnknownBPRegistration() { DatanodeRegistration reg = new DatanodeRegistration(getMachineName()); reg.setInfoPort(infoServer.getPort()); reg.setIpcPort(getIpcPort()); @@ -1717,6 +1084,68 @@ static String createNewStorageId(int port) { return "DS-" + rand + "-" + ip + "-" + port + "-" + System.currentTimeMillis(); } + + /** Ensure the authentication method is kerberos */ + private void checkKerberosAuthMethod(String msg) throws IOException { + // User invoking the call must be same as the datanode user + if (!UserGroupInformation.isSecurityEnabled()) { + return; + } + if (UserGroupInformation.getCurrentUser().getAuthenticationMethod() != + AuthenticationMethod.KERBEROS) { + throw new AccessControlException("Error in " + msg + + "Only kerberos based authentication is allowed."); + } + } + + private void checkBlockLocalPathAccess() throws IOException { + checkKerberosAuthMethod("getBlockLocalPathInfo()"); + String currentUser = UserGroupInformation.getCurrentUser().getShortUserName(); + if (!currentUser.equals(this.userWithLocalPathAccess)) { + throw new AccessControlException( + "Can't continue with getBlockLocalPathInfo() " + + "authorization. The user " + currentUser + + " is not allowed to call getBlockLocalPathInfo"); + } + } + + @Override + public BlockLocalPathInfo getBlockLocalPathInfo(ExtendedBlock block, + Token token) throws IOException { + checkBlockLocalPathAccess(); + checkBlockToken(block, token, BlockTokenSecretManager.AccessMode.READ); + BlockLocalPathInfo info = data.getBlockLocalPathInfo(block); + if (LOG.isDebugEnabled()) { + if (info != null) { + if (LOG.isTraceEnabled()) { + LOG.trace("getBlockLocalPathInfo successful block=" + block + + " blockfile " + info.getBlockPath() + " metafile " + + info.getMetaPath()); + } + } else { + if (LOG.isTraceEnabled()) { + LOG.trace("getBlockLocalPathInfo for block=" + block + + " returning null"); + } + } + } + metrics.incrBlocksGetLocalPathInfo(); + return info; + } + + private void checkBlockToken(ExtendedBlock block, Token token, + AccessMode accessMode) throws IOException { + if (isBlockTokenEnabled && UserGroupInformation.isSecurityEnabled()) { + BlockTokenIdentifier id = new BlockTokenIdentifier(); + ByteArrayInputStream buf = new ByteArrayInputStream(token.getIdentifier()); + DataInputStream in = new DataInputStream(buf); + id.readFields(in); + if (LOG.isDebugEnabled()) { + LOG.debug("Got: " + id.toString()); + } + blockPoolTokenSecretManager.checkAccess(id, null, block, accessMode); + } + } /** * Shut down this instance of the datanode. @@ -1917,7 +1346,7 @@ private void transferBlock( ExtendedBlock block, } } - private void transferBlocks(String poolId, Block blocks[], + void transferBlocks(String poolId, Block blocks[], DatanodeInfo xferTargets[][]) { for (int i = 0; i < blocks.length; i++) { try { @@ -2034,7 +1463,7 @@ private class DataTransfer implements Runnable { * entire target list, the block, and the data. */ DataTransfer(DatanodeInfo targets[], ExtendedBlock b, BlockConstructionStage stage, - final String clientname) throws IOException { + final String clientname) { if (DataTransferProtocol.LOG.isDebugEnabled()) { DataTransferProtocol.LOG.debug(getClass().getSimpleName() + ": " + b + " (numBytes=" + b.getNumBytes() + ")" @@ -2209,9 +1638,7 @@ public static DataNode instantiateDataNode(String args [], Configuration conf, System.exit(-1); } Collection dataDirs = getStorageDirs(conf); - dnThreadName = "DataNode: [" + - StringUtils.uriToString(dataDirs.toArray(new URI[0])) + "]"; - UserGroupInformation.setConfiguration(conf); + UserGroupInformation.setConfiguration(conf); SecurityUtil.login(conf, DFS_DATANODE_KEYTAB_FILE_KEY, DFS_DATANODE_USER_NAME_KEY); return makeInstance(dataDirs, conf, resources); @@ -2554,16 +1981,6 @@ public DatanodeProtocol getBPNamenode(String bpid) throws IOException { return bpos.bpNamenode; } - /** - * To be used by tests only to set a mock namenode in BPOfferService - */ - void setBPNamenode(String bpid, DatanodeProtocol namenode) { - BPOfferService bp = blockPoolManager.get(bpid); - if (bp != null) { - bp.setNameNode(namenode); - } - } - /** Block synchronization */ void syncBlock(RecoveringBlock rBlock, List syncList) throws IOException { @@ -2753,6 +2170,14 @@ void transferReplicaForPipelineRecovery(final ExtendedBlock b, } } + /** + * Finalize a pending upgrade in response to DNA_FINALIZE. + * @param blockPoolId the block pool to finalize + */ + void finalizeUpgradeForPool(String blockPoolId) throws IOException { + storage.finalizeUpgrade(blockPoolId); + } + // Determine a Datanode's streaming address public static InetSocketAddress getStreamingAddr(Configuration conf) { return NetUtils.createSocketAddr( @@ -2789,7 +2214,7 @@ public String getNamenodeAddresses() { final Map info = new HashMap(); for (BPOfferService bpos : blockPoolManager.getAllNamenodeThreads()) { if (bpos != null && bpos.bpThread != null) { - info.put(bpos.getNNSocketAddress().getHostName(), bpos.blockPoolId); + info.put(bpos.getNNSocketAddress().getHostName(), bpos.getBlockPoolId()); } } return JSON.toString(info); @@ -2820,13 +2245,7 @@ public synchronized String getClusterId() { } public void refreshNamenodes(Configuration conf) throws IOException { - try { - blockPoolManager.refreshNamenodes(conf); - } catch (InterruptedException ex) { - IOException eio = new IOException(); - eio.initCause(ex); - throw eio; - } + blockPoolManager.refreshNamenodes(conf); } @Override //ClientDatanodeProtocol @@ -2877,7 +2296,7 @@ public boolean isBPServiceAlive(String bpid) { */ public boolean isDatanodeFullyStarted() { for (BPOfferService bp : blockPoolManager.getAllNamenodeThreads()) { - if (!bp.initialized() || !bp.isAlive()) { + if (!bp.isInitialized() || !bp.isAlive()) { return false; } } @@ -2904,4 +2323,9 @@ public Long getBalancerBandwidth() { DNConf getDnConf() { return dnConf; } + + boolean shouldRun() { + return shouldRun; + } + } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java index 11282a5b7a..fe07754a3c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java @@ -48,8 +48,6 @@ import org.apache.hadoop.hdfs.protocol.datatransfer.Receiver; import org.apache.hadoop.hdfs.protocol.datatransfer.Sender; import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto; -import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto.Builder; -import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProtoOrBuilder; import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientReadStatusProto; import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto; import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto; @@ -128,7 +126,7 @@ private void updateCurrentThreadName(String status) { public void run() { int opsProcessed = 0; Op op = null; - dataXceiverServer.childSockets.put(s, s); + dataXceiverServer.childSockets.add(s); try { int stdTimeout = s.getSoTimeout(); @@ -165,14 +163,6 @@ public void run() { s.setSoTimeout(stdTimeout); } - // Make sure the xceiver count is not exceeded - int curXceiverCount = datanode.getXceiverCount(); - if (curXceiverCount > dataXceiverServer.maxXceiverCount) { - throw new IOException("xceiverCount " + curXceiverCount - + " exceeds the limit of concurrent xcievers " - + dataXceiverServer.maxXceiverCount); - } - opStartTime = now(); processOp(op); ++opsProcessed; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiverServer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiverServer.java index c0d782a5c7..eed58ecad4 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiverServer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiverServer.java @@ -23,9 +23,9 @@ import java.net.SocketTimeoutException; import java.nio.channels.AsynchronousCloseException; import java.util.Collections; -import java.util.HashMap; +import java.util.HashSet; import java.util.Iterator; -import java.util.Map; +import java.util.Set; import org.apache.commons.logging.Log; import org.apache.hadoop.conf.Configuration; @@ -48,8 +48,8 @@ class DataXceiverServer implements Runnable { ServerSocket ss; DataNode datanode; // Record all sockets opened for data transfer - Map childSockets = Collections.synchronizedMap( - new HashMap()); + Set childSockets = Collections.synchronizedSet( + new HashSet()); /** * Maximal number of concurrent xceivers per node. @@ -135,6 +135,15 @@ public void run() { try { s = ss.accept(); s.setTcpNoDelay(true); + + // Make sure the xceiver count is not exceeded + int curXceiverCount = datanode.getXceiverCount(); + if (curXceiverCount > maxXceiverCount) { + throw new IOException("Xceiver count " + curXceiverCount + + " exceeds the limit of concurrent xcievers: " + + maxXceiverCount); + } + new Daemon(datanode.threadGroup, new DataXceiver(s, datanode, this)) .start(); } catch (SocketTimeoutException ignored) { @@ -184,7 +193,7 @@ void kill() { // close all the sockets that were accepted earlier synchronized (childSockets) { - for (Iterator it = childSockets.values().iterator(); + for (Iterator it = childSockets.iterator(); it.hasNext();) { Socket thissock = it.next(); try { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/FSDataset.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/FSDataset.java index e309dc1f47..f885c8b21c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/FSDataset.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/FSDataset.java @@ -52,6 +52,7 @@ import org.apache.hadoop.hdfs.DFSUtil; import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.protocol.BlockListAsLongs; +import org.apache.hadoop.hdfs.protocol.BlockLocalPathInfo; import org.apache.hadoop.hdfs.protocol.ExtendedBlock; import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.RecoveryInProgressException; @@ -459,7 +460,7 @@ private long validateIntegrity(File blockFile, long genStamp) { long metaFileLen = metaFile.length(); int crcHeaderLen = DataChecksum.getChecksumHeaderSize(); if (!blockFile.exists() || blockFileLen == 0 || - !metaFile.exists() || metaFileLen < (long)crcHeaderLen) { + !metaFile.exists() || metaFileLen < crcHeaderLen) { return 0; } checksumIn = new DataInputStream( @@ -578,7 +579,7 @@ long getBlockPoolUsed(String bpid) throws IOException { * reserved capacity. * @return the unreserved number of bytes left in this filesystem. May be zero. */ - long getCapacity() throws IOException { + long getCapacity() { long remaining = usage.getCapacity() - reserved; return remaining > 0 ? remaining : 0; } @@ -818,7 +819,7 @@ private long getBlockPoolUsed(String bpid) throws IOException { return dfsUsed; } - private long getCapacity() throws IOException { + private long getCapacity() { long capacity = 0L; for (FSVolume vol : volumes) { capacity += vol.getCapacity(); @@ -1667,7 +1668,7 @@ private void bumpReplicaGS(ReplicaInfo replicaInfo, } if (!oldmeta.renameTo(newmeta)) { replicaInfo.setGenerationStamp(oldGS); // restore old GS - throw new IOException("Block " + (Block)replicaInfo + " reopen failed. " + + throw new IOException("Block " + replicaInfo + " reopen failed. " + " Unable to move meta file " + oldmeta + " to " + newmeta); } @@ -2018,7 +2019,7 @@ private boolean isValid(final ExtendedBlock b, final ReplicaState state) { /** * Find the file corresponding to the block and return it if it exists. */ - File validateBlockFile(String bpid, Block b) throws IOException { + File validateBlockFile(String bpid, Block b) { //Should we check for metadata file too? File f = getFile(bpid, b); @@ -2327,7 +2328,7 @@ public void checkAndUpdate(String bpid, long blockId, File diskFile, if (datanode.blockScanner != null) { datanode.blockScanner.addBlock(new ExtendedBlock(bpid, diskBlockInfo)); } - DataNode.LOG.warn("Added missing block to memory " + (Block)diskBlockInfo); + DataNode.LOG.warn("Added missing block to memory " + diskBlockInfo); return; } /* @@ -2600,7 +2601,7 @@ public synchronized void shutdownBlockPool(String bpid) { * get list of all bpids * @return list of bpids */ - public String [] getBPIdlist() throws IOException { + public String [] getBPIdlist() { return volumeMap.getBlockPoolList(); } @@ -2658,4 +2659,14 @@ public synchronized void deleteBlockPool(String bpid, boolean force) volume.deleteBPDirectories(bpid, force); } } + + @Override // FSDatasetInterface + public BlockLocalPathInfo getBlockLocalPathInfo(ExtendedBlock block) + throws IOException { + File datafile = getBlockFile(block); + File metafile = getMetaFile(datafile, block.getGenerationStamp()); + BlockLocalPathInfo info = new BlockLocalPathInfo(block, + datafile.getAbsolutePath(), metafile.getAbsolutePath()); + return info; + } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/FSDatasetInterface.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/FSDatasetInterface.java index 38017cfdb8..2f05f16c2f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/FSDatasetInterface.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/FSDatasetInterface.java @@ -19,6 +19,7 @@ import java.io.Closeable; +import java.io.File; import java.io.FilterInputStream; import java.io.IOException; import java.io.InputStream; @@ -31,6 +32,7 @@ import org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringBlock; import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.protocol.BlockListAsLongs; +import org.apache.hadoop.hdfs.protocol.BlockLocalPathInfo; import org.apache.hadoop.hdfs.protocol.ExtendedBlock; import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.util.DataChecksum; @@ -402,4 +404,9 @@ public ReplicaInfo updateReplicaUnderRecovery( * @throws IOException */ public void deleteBlockPool(String bpid, boolean force) throws IOException; + + /** + * Get {@link BlockLocalPathInfo} for the given block. + **/ + public BlockLocalPathInfo getBlockLocalPathInfo(ExtendedBlock b) throws IOException; } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaInPipeline.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaInPipeline.java index 14c1258fe4..c20b0090d2 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaInPipeline.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaInPipeline.java @@ -17,7 +17,6 @@ */ package org.apache.hadoop.hdfs.server.datanode; -import java.io.DataInputStream; import java.io.File; import java.io.FileOutputStream; import java.io.IOException; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/metrics/DataNodeMetrics.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/metrics/DataNodeMetrics.java index 4df11d3434..9e18007810 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/metrics/DataNodeMetrics.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/metrics/DataNodeMetrics.java @@ -60,6 +60,7 @@ public class DataNodeMetrics { @Metric MutableCounterLong readsFromRemoteClient; @Metric MutableCounterLong writesFromLocalClient; @Metric MutableCounterLong writesFromRemoteClient; + @Metric MutableCounterLong blocksGetLocalPathInfo; @Metric MutableCounterLong volumeFailures; @@ -165,4 +166,9 @@ public void incrReadsFromClient(boolean local) { public void incrVolumeFailures() { volumeFailures.incr(); } + + /** Increment for getBlockLocalPathInfo calls */ + public void incrBlocksGetLocalPathInfo() { + blocksGetLocalPathInfo.incr(); + } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupImage.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupImage.java index dd68261253..fc1fe14af7 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupImage.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupImage.java @@ -17,14 +17,10 @@ */ package org.apache.hadoop.hdfs.server.namenode; -import java.io.BufferedInputStream; -import java.io.DataInputStream; import java.io.IOException; import java.util.Collection; import java.util.Iterator; import java.util.List; -import java.util.zip.Checksum; - import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupJournalManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupJournalManager.java index 2bd585e236..c655ee75bb 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupJournalManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupJournalManager.java @@ -19,7 +19,6 @@ import java.io.IOException; -import org.apache.hadoop.hdfs.server.namenode.NNStorageRetentionManager.StoragePurger; import org.apache.hadoop.hdfs.server.protocol.NamenodeRegistration; /** diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/Checkpointer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/Checkpointer.java index 84408c0162..39d2abaee7 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/Checkpointer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/Checkpointer.java @@ -36,7 +36,6 @@ import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocol; import org.apache.hadoop.hdfs.server.protocol.RemoteEditLog; import org.apache.hadoop.hdfs.server.protocol.RemoteEditLogManifest; -import org.apache.hadoop.http.HttpServer; import org.apache.hadoop.io.MD5Hash; import org.apache.hadoop.util.Daemon; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/DfsServlet.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/DfsServlet.java index 1c8253f665..6459ffd0e0 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/DfsServlet.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/DfsServlet.java @@ -23,8 +23,6 @@ import javax.servlet.ServletContext; import javax.servlet.http.HttpServlet; import javax.servlet.http.HttpServletRequest; -import javax.servlet.http.HttpServletResponse; - import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogFileInputStream.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogFileInputStream.java index 9db7f8ae66..d05c4fe3d5 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogFileInputStream.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogFileInputStream.java @@ -26,10 +26,7 @@ import java.io.DataInputStream; import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.server.common.Storage; -import org.apache.hadoop.hdfs.server.namenode.FSEditLogLoader.EditLogValidation; import org.apache.hadoop.io.IOUtils; -import org.apache.hadoop.hdfs.protocol.HdfsConstants; - import com.google.common.annotations.VisibleForTesting; /** diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java index f80f863346..cb0f88e85a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java @@ -37,7 +37,6 @@ import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory; import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.*; import org.apache.hadoop.hdfs.server.namenode.JournalSet.JournalAndStream; -import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeDirType; import org.apache.hadoop.hdfs.server.namenode.metrics.NameNodeMetrics; import org.apache.hadoop.hdfs.server.protocol.NamenodeRegistration; import org.apache.hadoop.hdfs.server.protocol.RemoteEditLogManifest; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java index 991fd08c84..30b0b8c151 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java @@ -19,7 +19,6 @@ import static org.apache.hadoop.hdfs.server.common.Util.now; -import java.io.File; import java.io.FilterInputStream; import java.io.IOException; import java.io.InputStream; @@ -34,7 +33,6 @@ import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo; import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoUnderConstruction; import org.apache.hadoop.hdfs.server.common.Storage; -import org.apache.hadoop.hdfs.server.namenode.EditLogFileInputStream.LogHeaderCorruptException; import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.AddCloseOp; import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.CancelDelegationTokenOp; import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.ClearNSQuotaOp; @@ -57,8 +55,6 @@ import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.UpdateMasterKeyOp; import org.apache.hadoop.hdfs.server.namenode.LeaseManager.Lease; import org.apache.hadoop.hdfs.util.Holder; -import org.apache.hadoop.io.IOUtils; - import com.google.common.base.Joiner; public class FSEditLogLoader { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageStorageInspector.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageStorageInspector.java index a7c2949f29..a3a516f0ed 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageStorageInspector.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageStorageInspector.java @@ -19,8 +19,6 @@ import java.io.File; import java.io.IOException; -import java.util.List; - import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageTransactionalStorageInspector.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageTransactionalStorageInspector.java index 33d6e90f92..dbf1860a85 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageTransactionalStorageInspector.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageTransactionalStorageInspector.java @@ -23,28 +23,16 @@ import java.io.IOException; import java.util.ArrayList; import java.util.List; -import java.util.Map; -import java.util.Set; -import java.util.SortedMap; -import java.util.TreeMap; -import java.util.TreeSet; import java.util.regex.Matcher; import java.util.regex.Pattern; -import org.apache.commons.lang.StringUtils; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.fs.FileUtil; -import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory; import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeDirType; import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeFile; -import org.apache.hadoop.hdfs.server.protocol.RemoteEditLog; -import org.apache.hadoop.hdfs.server.protocol.RemoteEditLogManifest; - -import com.google.common.base.Joiner; import com.google.common.collect.ImmutableList; -import com.google.common.collect.Lists; class FSImageTransactionalStorageInspector extends FSImageStorageInspector { public static final Log LOG = LogFactory.getLog( diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java index 70680e3649..fc56db4752 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java @@ -305,7 +305,20 @@ private static final void logAuditEvent(UserGroupInformation ugi, * @throws IOException if loading fails */ public static FSNamesystem loadFromDisk(Configuration conf) throws IOException { - FSImage fsImage = new FSImage(conf); + Collection namespaceDirs = FSNamesystem.getNamespaceDirs(conf); + Collection namespaceEditsDirs = + FSNamesystem.getNamespaceEditsDirs(conf); + + if (namespaceDirs.size() == 1) { + LOG.warn("Only one " + DFS_NAMENODE_NAME_DIR_KEY + + " directory configured , beware data loss!"); + } + if (namespaceEditsDirs.size() == 1) { + LOG.warn("Only one " + DFS_NAMENODE_EDITS_DIR_KEY + + " directory configured , beware data loss!"); + } + + FSImage fsImage = new FSImage(conf, namespaceDirs, namespaceEditsDirs); FSNamesystem namesystem = new FSNamesystem(conf, fsImage); long loadStart = now(); @@ -2060,10 +2073,12 @@ void removePathAndBlocks(String src, List blocks) { } } - /** Get the file info for a specific file. + /** + * Get the file info for a specific file. + * * @param src The string representation of the path to the file * @param resolveLink whether to throw UnresolvedLinkException - * if src refers to a symlinks + * if src refers to a symlink * * @throws AccessControlException if access is denied * @throws UnresolvedLinkException if a symlink is encountered. @@ -2271,6 +2286,7 @@ boolean internalReleaseLease(Lease lease, String src, // If the penultimate block is not COMPLETE, then it must be COMMITTED. if(nrCompleteBlocks < nrBlocks - 2 || nrCompleteBlocks == nrBlocks - 2 && + curBlock != null && curBlock.getBlockUCState() != BlockUCState.COMMITTED) { final String message = "DIR* NameSystem.internalReleaseLease: " + "attempt to release a create lock on " @@ -2459,7 +2475,7 @@ void commitBlockSynchronization(ExtendedBlock lastblock, newtargets[i]); } } - if (closeFile) { + if ((closeFile) && (descriptors != null)) { // the file is getting closed. Insert block locations into blockManager. // Otherwise fsck will report these blocks as MISSING, especially if the // blocksReceived from Datanodes take a long time to arrive. @@ -3283,6 +3299,7 @@ public String toString() { /** * Checks consistency of the class state. * This is costly and currently called only in assert. + * @throws IOException */ private boolean isConsistent() { if (blockTotal == -1 && blockSafe == -1) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileChecksumServlets.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileChecksumServlets.java index d8bd502597..023e3b612e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileChecksumServlets.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileChecksumServlets.java @@ -19,8 +19,6 @@ import java.io.IOException; import java.io.PrintWriter; -import java.net.URI; -import java.net.URISyntaxException; import java.net.URL; import javax.net.SocketFactory; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileDataServlet.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileDataServlet.java index 3e630661e9..543033b341 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileDataServlet.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileDataServlet.java @@ -18,7 +18,6 @@ package org.apache.hadoop.hdfs.server.namenode; import java.io.IOException; -import java.net.URISyntaxException; import java.net.URL; import java.security.PrivilegedExceptionAction; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java index 83d9858586..9885f23f92 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java @@ -372,14 +372,16 @@ static String[] getPathNames(String path) { /** * Given some components, create a path name. - * @param components + * @param components The path components + * @param start index + * @param end index * @return concatenated path */ - static String constructPath(byte[][] components, int start) { + static String constructPath(byte[][] components, int start, int end) { StringBuilder buf = new StringBuilder(); - for (int i = start; i < components.length; i++) { + for (int i = start; i < end; i++) { buf.append(DFSUtil.bytes2String(components[i])); - if (i < components.length - 1) { + if (i < end - 1) { buf.append(Path.SEPARATOR); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java index 7f0c997ee9..f4d9e78f88 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java @@ -23,8 +23,6 @@ import java.util.List; import org.apache.hadoop.fs.UnresolvedLinkException; -import org.apache.hadoop.fs.permission.FsAction; -import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.fs.permission.PermissionStatus; import org.apache.hadoop.hdfs.DFSUtil; import org.apache.hadoop.hdfs.protocol.Block; @@ -191,18 +189,19 @@ assert compareBytes(this.name, components[0]) == 0 : existing[index] = curNode; } if (curNode.isLink() && (!lastComp || (lastComp && resolveLink))) { - if(NameNode.stateChangeLog.isDebugEnabled()) { + final String path = constructPath(components, 0, components.length); + final String preceding = constructPath(components, 0, count); + final String remainder = + constructPath(components, count + 1, components.length); + final String link = DFSUtil.bytes2String(components[count]); + final String target = ((INodeSymlink)curNode).getLinkValue(); + if (NameNode.stateChangeLog.isDebugEnabled()) { NameNode.stateChangeLog.debug("UnresolvedPathException " + - " count: " + count + - " componenent: " + DFSUtil.bytes2String(components[count]) + - " full path: " + constructPath(components, 0) + - " remaining path: " + constructPath(components, count+1) + - " symlink: " + ((INodeSymlink)curNode).getLinkValue()); + " path: " + path + " preceding: " + preceding + + " count: " + count + " link: " + link + " target: " + target + + " remainder: " + remainder); } - final String linkTarget = ((INodeSymlink)curNode).getLinkValue(); - throw new UnresolvedPathException(constructPath(components, 0), - constructPath(components, count+1), - linkTarget); + throw new UnresolvedPathException(path, preceding, remainder, target); } if (lastComp || !curNode.isDirectory()) { break; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeHttpServer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeHttpServer.java index 4b59e50924..6f476ec0f3 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeHttpServer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeHttpServer.java @@ -17,6 +17,10 @@ */ package org.apache.hadoop.hdfs.server.namenode; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_HTTPS_ADDRESS_KEY; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_SERVER_HTTPS_KEYSTORE_RESOURCE_KEY; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_SERVER_HTTPS_KEYSTORE_RESOURCE_DEFAULT; + import java.io.IOException; import java.net.InetSocketAddress; import java.security.PrivilegedExceptionAction; @@ -145,7 +149,7 @@ private Map getAuthFilterParams(Configuration conf) } }; - boolean certSSL = conf.getBoolean("dfs.https.enable", false); + boolean certSSL = conf.getBoolean(DFSConfigKeys.DFS_HTTPS_ENABLE_KEY, false); boolean useKrb = UserGroupInformation.isSecurityEnabled(); if (certSSL || useKrb) { boolean needClientAuth = conf.getBoolean( @@ -156,14 +160,14 @@ private Map getAuthFilterParams(Configuration conf) DFSConfigKeys.DFS_NAMENODE_HTTPS_ADDRESS_DEFAULT)); Configuration sslConf = new HdfsConfiguration(false); if (certSSL) { - sslConf.addResource(conf.get( - "dfs.https.server.keystore.resource", "ssl-server.xml")); + sslConf.addResource(conf.get(DFS_SERVER_HTTPS_KEYSTORE_RESOURCE_KEY, + DFS_SERVER_HTTPS_KEYSTORE_RESOURCE_DEFAULT)); } httpServer.addSslListener(secInfoSocAddr, sslConf, needClientAuth, useKrb); // assume same ssl port for all datanodes InetSocketAddress datanodeSslPort = NetUtils.createSocketAddr(conf - .get("dfs.datanode.https.address", infoHost + ":" + 50475)); + .get(DFS_DATANODE_HTTPS_ADDRESS_KEY, infoHost + ":" + 50475)); httpServer.setAttribute("datanode.https.port", datanodeSslPort .getPort()); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java index 2a2318cb6e..ff05744343 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java @@ -819,10 +819,6 @@ public void createSymlink(String target, String link, FsPermission dirPerms, public String getLinkTarget(String path) throws IOException { nn.checkOperation(OperationCategory.READ); metrics.incrGetLinkTargetOps(); - /* Resolves the first symlink in the given path, returning a - * new path consisting of the target of the symlink and any - * remaining path components from the original path. - */ try { HdfsFileStatus stat = namesystem.getFileInfo(path, false); if (stat != null) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java index 74f8b2d5b9..96cc8982df 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java @@ -29,7 +29,6 @@ import java.util.Iterator; import java.util.List; import java.util.Map; -import java.util.Random; import java.util.TreeSet; import org.apache.commons.logging.Log; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeJspHelper.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeJspHelper.java index 3d2fd8b0be..496423d4a6 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeJspHelper.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeJspHelper.java @@ -380,7 +380,13 @@ static void redirectToRandomDataNode(ServletContext context, final NameNode nn = NameNodeHttpServer.getNameNodeFromContext(context); final Configuration conf = (Configuration) context .getAttribute(JspHelper.CURRENT_CONF); - final DatanodeID datanode = getRandomDatanode(nn); + // We can't redirect if there isn't a DN to redirect to. + // Lets instead show a proper error message. + if (nn.getNamesystem().getNumLiveDataNodes() < 1) { + throw new IOException("Can't browse the DFS since there are no " + + "live nodes available to redirect to."); + } + final DatanodeID datanode = getRandomDatanode(nn);; UserGroupInformation ugi = JspHelper.getUGI(context, request, conf); String tokenString = getDelegationToken( nn.getRpcServer(), request, conf, ugi); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/RenewDelegationTokenServlet.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/RenewDelegationTokenServlet.java index ddd0acbbfb..d6a42dde55 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/RenewDelegationTokenServlet.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/RenewDelegationTokenServlet.java @@ -29,7 +29,6 @@ import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier; -import org.apache.hadoop.hdfs.server.common.JspHelper; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.token.Token; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/RemoteEditLog.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/RemoteEditLog.java index c82494d5ba..b6f58877f2 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/RemoteEditLog.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/RemoteEditLog.java @@ -20,8 +20,6 @@ import java.io.DataInput; import java.io.DataOutput; import java.io.IOException; -import java.util.Comparator; - import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.io.Writable; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocolR23Compatible/BlockCommandWritable.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocolR23Compatible/BlockCommandWritable.java index 990b235c9f..5f22888678 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocolR23Compatible/BlockCommandWritable.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocolR23Compatible/BlockCommandWritable.java @@ -20,17 +20,12 @@ import java.io.DataInput; import java.io.DataOutput; import java.io.IOException; -import java.util.List; - import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; -import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocolR23Compatible.BlockWritable; import org.apache.hadoop.hdfs.protocolR23Compatible.DatanodeInfoWritable; -import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor.BlockTargetPair; import org.apache.hadoop.hdfs.server.protocol.BlockCommand; -import org.apache.hadoop.hdfs.server.protocol.DatanodeCommand; import org.apache.hadoop.io.Text; import org.apache.hadoop.io.Writable; import org.apache.hadoop.io.WritableFactories; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSck.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSck.java index b2a84a956a..bc98995af3 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSck.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSck.java @@ -34,7 +34,6 @@ import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.hdfs.server.namenode.NameNode; import org.apache.hadoop.hdfs.server.namenode.NamenodeFsck; -import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DFSUtil; import org.apache.hadoop.hdfs.DistributedFileSystem; import org.apache.hadoop.hdfs.HdfsConfiguration; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DelegationTokenFetcher.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DelegationTokenFetcher.java index eb8af25d26..2a685f73bd 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DelegationTokenFetcher.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DelegationTokenFetcher.java @@ -40,7 +40,6 @@ import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hdfs.DFSConfigKeys; -import org.apache.hadoop.hdfs.DistributedFileSystem; import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.HftpFileSystem; import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/EditsLoader.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/EditsLoader.java index a485871c54..dfa5b1408f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/EditsLoader.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/EditsLoader.java @@ -18,8 +18,6 @@ package org.apache.hadoop.hdfs.tools.offlineEditsViewer; import java.io.IOException; -import java.util.Map; - import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/EditsVisitor.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/EditsVisitor.java index 6dbca3c4a6..c8a8912741 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/EditsVisitor.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/EditsVisitor.java @@ -22,8 +22,6 @@ import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; -import org.apache.hadoop.hdfs.DeprecatedUTF8; - import static org.apache.hadoop.hdfs.tools.offlineEditsViewer.Tokenizer.ByteToken; import static org.apache.hadoop.hdfs.tools.offlineEditsViewer.Tokenizer.ShortToken; import static org.apache.hadoop.hdfs.tools.offlineEditsViewer.Tokenizer.IntToken; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/EditsVisitorFactory.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/EditsVisitorFactory.java index b4c9b561e4..43af249a46 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/EditsVisitorFactory.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/EditsVisitorFactory.java @@ -18,8 +18,6 @@ package org.apache.hadoop.hdfs.tools.offlineEditsViewer; import java.io.IOException; -import java.util.regex.Pattern; - import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/TokenizerFactory.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/TokenizerFactory.java index 8ce57c6802..b9333a841f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/TokenizerFactory.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/TokenizerFactory.java @@ -18,8 +18,6 @@ package org.apache.hadoop.hdfs.tools.offlineEditsViewer; import java.io.IOException; -import java.util.regex.Pattern; - import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/DirectBufferPool.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/DirectBufferPool.java index 69b238bbbf..6972a86037 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/DirectBufferPool.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/DirectBufferPool.java @@ -19,11 +19,6 @@ import java.lang.ref.WeakReference; import java.nio.ByteBuffer; -import java.util.HashMap; -import java.util.Iterator; -import java.util.LinkedList; -import java.util.List; -import java.util.Map; import java.util.Queue; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ConcurrentLinkedQueue; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/LightWeightHashSet.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/LightWeightHashSet.java index 866734f17b..13ca8b45c5 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/LightWeightHashSet.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/LightWeightHashSet.java @@ -22,7 +22,6 @@ import java.util.Collection; import java.util.ConcurrentModificationException; import java.util.Iterator; -import java.util.LinkedList; import java.util.List; import java.util.NoSuchElementException; @@ -615,4 +614,4 @@ public boolean removeAll(Collection c) { public boolean retainAll(Collection c) { throw new UnsupportedOperationException("retainAll is not supported."); } -} \ No newline at end of file +} diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml index 59a8ff645f..c99763e395 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml @@ -122,11 +122,17 @@ creations/deletions), or "all". dfs.datanode.https.address 0.0.0.0:50475 + The datanode secure http server address and port. + If the port is 0 then the server will start on a free port. + dfs.namenode.https-address 0.0.0.0:50470 + The namenode secure http server address and port. + If the port is 0 then the server will start on a free port. + @@ -193,6 +199,14 @@ creations/deletions), or "all". directories, for redundancy. + + dfs.namenode.name.dir.restore + false + Set to true to enable NameNode to attempt recovering a + previously failed dfs.name.dir. When enabled, a recovery of any failed + directory is attempted during checkpoint. + + dfs.namenode.fs-limits.max-component-length 0 diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/proto/InterDatanodeProtocol.proto b/hadoop-hdfs-project/hadoop-hdfs/src/proto/InterDatanodeProtocol.proto new file mode 100644 index 0000000000..9f36e29e0f --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/proto/InterDatanodeProtocol.proto @@ -0,0 +1,78 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// This file contains protocol buffers that are used throughout HDFS -- i.e. +// by the client, server, and data transfer protocols. + +option java_package = "org.apache.hadoop.hdfs.protocol.proto"; +option java_outer_classname = "InterDatanodeProtocolProtos"; +option java_generic_services = true; +option java_generate_equals_and_hash = true; + +import "hdfs.proto"; + +/** + * Block with location information and new generation stamp + * to be used for recovery. + */ +message InitReplicaRecoveryRequestProto { + required RecoveringBlockProto block = 1; +} + +/** + * Repica recovery information + */ +message InitReplicaRecoveryResponseProto { + required ReplicaState state = 1; // State fo the replica + required BlockProto block = 2; // block information +} + +/** + * Update replica with new generation stamp and length + */ +message UpdateReplicaUnderRecoveryRequestProto { + required ExtendedBlockProto block = 1; // Block identifier + required uint64 recoveryId = 2; // New genstamp of the replica + required uint64 newLength = 3; // New length of the replica +} + +/** + * Response returns updated block information + */ +message UpdateReplicaUnderRecoveryResponseProto { + required ExtendedBlockProto block = 1; // Updated block information +} + +/** + * Protocol used between datanodes for block recovery. + * + * See the request and response for details of rpc call. + */ +service InterDatanodeProtocolService { + /** + * Initialize recovery of a replica + */ + rpc initReplicaRecovery(InitReplicaRecoveryRequestProto) + returns(InitReplicaRecoveryResponseProto); + + /** + * Update a replica with new generation stamp and length + */ + rpc updateReplicaUnderRecovery(UpdateReplicaUnderRecoveryRequestProto) + returns(UpdateReplicaUnderRecoveryResponseProto); +} diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/proto/JournalProtocol.proto b/hadoop-hdfs-project/hadoop-hdfs/src/proto/JournalProtocol.proto new file mode 100644 index 0000000000..fa7ed15c64 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/proto/JournalProtocol.proto @@ -0,0 +1,83 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// This file contains protocol buffers that are used throughout HDFS -- i.e. +// by the client, server, and data transfer protocols. + +option java_package = "org.apache.hadoop.hdfs.protocol.proto"; +option java_outer_classname = "JournalProtocolProtos"; +option java_generic_services = true; +option java_generate_equals_and_hash = true; + +import "hdfs.proto"; + +/** + * registration - the registration info of the active NameNode + * firstTxnId - the first txid in the rolled edit log + * numTxns - Number of transactions in editlog + * records - bytes containing serialized journal records + */ +message JournalRequestProto { + required NamenodeRegistrationProto registration = 1; // Registration info + required uint64 firstTxnId = 2; // Transaction ID + required uint32 numTxns = 3; // Transaction ID + required bytes records = 4; // Journal record +} + +/** + * void response + */ +message JournalResponseProto { +} + +/** + * registration - the registration info of the active NameNode + * txid - first txid in the new log + */ +message StartLogSegmentRequestProto { + required NamenodeRegistrationProto registration = 1; // Registration info + required uint64 txid = 2; // Transaction ID +} + +/** + * void response + */ +message StartLogSegmentResponseProto { +} + +/** + * Protocol used to journal edits to a remote node. Currently, + * this is used to publish edits from the NameNode to a BackupNode. + * + * See the request and response for details of rpc call. + */ +service JournalProtocolService { + /** + * Request sent by active namenode to backup node via + * EditLogBackupOutputStream to stream editlog records. + */ + rpc journal(JournalRequestProto) returns (JournalResponseProto); + + /** + * Request sent by active namenode to backup node to notify + * that the NameNode has rolled its edit logs and is now writing a + * new log segment. + */ + rpc startLogSegment(StartLogSegmentRequestProto) + returns (StartLogSegmentResponseProto); +} diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/proto/NamenodeProtocol.proto b/hadoop-hdfs-project/hadoop-hdfs/src/proto/NamenodeProtocol.proto new file mode 100644 index 0000000000..27fa6d1995 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/proto/NamenodeProtocol.proto @@ -0,0 +1,225 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// This file contains protocol buffers that are used throughout HDFS -- i.e. +// by the client, server, and data transfer protocols. + +option java_package = "org.apache.hadoop.hdfs.protocol.proto"; +option java_outer_classname = "NamenodeProtocolProtos"; +option java_generic_services = true; +option java_generate_equals_and_hash = true; + +import "hdfs.proto"; + +/** + * Get list of blocks for a given datanode with the total length + * of adding up to given size + * datanode - Datanode ID to get list of block from + * size - size to which the block lengths must add up to + */ +message GetBlocksRequestProto { + required DatanodeIDProto datanode = 1; // Datanode ID + required uint64 size = 2; // Size in bytes +} + + +/** + * blocks - List of returned blocks + */ +message GetBlocksResponseProto { + required BlockWithLocationsProto blocks = 1; // List of blocks +} + +/** + * void request + */ +message GetBlockKeysRequestProto { +} + +/** + * keys - Information about block keys at the active namenode + */ +message GetBlockKeysResponseProto { + required ExportedBlockKeysProto keys = 1; +} + +/** + * void request + */ +message GetTransactionIdRequestProto { +} + +/** + * txId - Transaction ID of the most recently persisted edit log record + */ +message GetTransactionIdResponseProto { + required uint64 txId = 1; // Transaction ID +} + +/** + * void request + */ +message RollEditLogRequestProto { +} + +/** + * signature - A unique token to identify checkpoint transaction + */ +message RollEditLogResponseProto { + required CheckpointSignatureProto signature = 1; +} + +/** + * registartion - Namenode reporting the error + * errorCode - error code indicating the error + * msg - Free text description of the error + */ +message ErrorReportRequestProto { + required NamenodeRegistrationProto registartion = 1; // Registartion info + required uint32 errorCode = 2; // Error code + required string msg = 3; // Error message +} + +/** + * void response + */ +message ErrorReportResponseProto { +} + +/** + * registration - Information of the namenode registering with primary namenode + */ +message RegisterRequestProto { + required NamenodeRegistrationProto registration = 1; // Registration info +} + +/** + * registration - Updated registration information of the newly registered + * datanode. + */ +message RegisterResponseProto { + required NamenodeRegistrationProto registration = 1; // Registration info +} + +/** + * Start checkpoint request + * registration - Namenode that is starting the checkpoint + */ +message StartCheckpointRequestProto { + required NamenodeRegistrationProto registration = 1; // Registration info +} + +/** + * command - Command returned by the active namenode to be + * be handled by the caller. + */ +message StartCheckpointResponseProto { + required NamenodeCommandProto command = 1; +} + +/** + * End or finalize the previously started checkpoint + * registration - Namenode that is ending the checkpoint + * signature - unique token to identify checkpoint transaction, + * that was received when checkpoint was started. + */ +message EndCheckpointRequestProto { + required NamenodeRegistrationProto registration = 1; // Registration info + required CheckpointSignatureProto signature = 2; +} + +/** + * void response + */ +message EndCheckpointResponseProto { +} + +/** + * sinceTxId - return the editlog information for transactions >= sinceTxId + */ +message GetEditLogManifestRequestProto { + required uint64 sinceTxId = 1; // Transaction ID +} + +/** + * manifest - Enumeration of editlogs from namenode for + * logs >= sinceTxId in the request + */ +message GetEditLogManifestResponseProto { + required RemoteEditLogManifestProto manifest = 1; +} + +/** + * Protocol used by the sub-ordinate namenode to send requests + * the active/primary namenode. + * + * See the request and response for details of rpc call. + */ +service NamenodeProtocolService { + /** + * Get list of blocks for a given datanode with length + * of blocks adding up to given size. + */ + rpc getBlocks(GetBlocksRequestProto) returns(GetBlocksResponseProto); + + /** + * Get the current block keys + */ + rpc getBlockKeys(GetBlockKeysRequestProto) returns(GetBlockKeysResponseProto); + + /** + * Get the transaction ID of the most recently persisted editlog record + */ + rpc getTransationId(GetTransactionIdRequestProto) + returns(GetTransactionIdResponseProto); + + /** + * Close the current editlog and open a new one for checkpointing purposes + */ + rpc rollEditLog(RollEditLogRequestProto) returns(RollEditLogResponseProto); + + /** + * Report from a sub-ordinate namenode of an error to the active namenode. + * Active namenode may decide to unregister the reporting namenode + * depending on the error. + */ + rpc errorReport(ErrorReportRequestProto) returns(ErrorReportResponseProto); + + /** + * Request to register a sub-ordinate namenode + */ + rpc register(RegisterRequestProto) returns(RegisterResponseProto); + + /** + * Request to start a checkpoint. + */ + rpc startCheckpoint(StartCheckpointRequestProto) + returns(StartCheckpointResponseProto); + + /** + * End of finalize the previously started checkpoint + */ + rpc endCheckpoint(EndCheckpointRequestProto) + returns(EndCheckpointResponseProto); + + /** + * Get editlog manifests from the active namenode for all the editlogs + */ + rpc getEditLogManifest(GetEditLogManifestRequestProto) + returns(GetEditLogManifestResponseProto); +} diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/proto/hdfs.proto b/hadoop-hdfs-project/hadoop-hdfs/src/proto/hdfs.proto index a77a7c312e..8b86066980 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/proto/hdfs.proto +++ b/hadoop-hdfs-project/hadoop-hdfs/src/proto/hdfs.proto @@ -53,6 +53,12 @@ message DatanodeIDProto { required uint32 ipcPort = 4; // the port where the ipc Server is running } +/** + * DatanodeID array + */ +message DatanodeIDsProto { + repeated DatanodeIDProto datanodes = 1; +} /** * The status of a Datanode @@ -76,7 +82,6 @@ message DatanodeInfoProto { optional AdminState adminState = 10; } - /** * Summary of a file or directory */ @@ -152,10 +157,10 @@ message HdfsFileStatusProto { required string group = 6; required uint64 modification_time = 7; required uint64 access_time = 8; - // + // Optional fields for symlink - optional bytes symlink = 9; // if symlink, target encoded java UTF8 - // + optional bytes symlink = 9; // if symlink, target encoded java UTF8 + // Optional fields for file optional uint32 block_replication = 10; // Actually a short - only 16bits used optional uint64 blocksize = 11; @@ -169,7 +174,7 @@ message FsServerDefaultsProto { required uint64 blockSize = 1; required uint32 bytesPerChecksum = 2; required uint32 writePacketSize = 3; - required uint32 replication = 4; // Actually a short - only 16bits used + required uint32 replication = 4; // Actually a short - only 16 bits used required uint32 fileBufferSize = 5; } @@ -187,5 +192,156 @@ message DirectoryListingProto { */ message UpgradeStatusReportProto { required uint32 version = 1;; - required uint32 upgradeStatus = 2; // Between 0 and 100 indicating the % complete + required uint32 upgradeStatus = 2; // % completed in range 0 & 100 } + +/** + * Common node information shared by all the nodes in the cluster + */ +message StorageInfoProto { + required uint32 layoutVersion = 1; // Layout version of the file system + required uint32 namespceID = 2; // File system namespace ID + required string clusterID = 3; // ID of the cluster + required uint64 cTime = 4; // File system creation time +} + +/** + * Information sent by a namenode to identify itself to the primary namenode. + */ +message NamenodeRegistrationProto { + required string rpcAddress = 1; // host:port of the namenode RPC address + required string httpAddress = 2; // host:port of the namenode http server + enum NamenodeRoleProto { + NAMENODE = 1; + BACKUP = 2; + CHECKPOINT = 3; + } + required StorageInfoProto storageInfo = 3; // Node information + optional NamenodeRoleProto role = 4; // Namenode role +} + +/** + * Unique signature to identify checkpoint transactions. + */ +message CheckpointSignatureProto { + required string blockPoolId = 1; + required uint64 mostRecentCheckpointTxId = 2; + required uint64 curSegmentTxId = 3; + required StorageInfoProto storageInfo = 4; +} + +/** + * Command sent from one namenode to another namenode. + */ +message NamenodeCommandProto { + enum Type { + NamenodeCommand = 0; // Base command + CheckPointCommand = 1; // Check point command + } + required uint32 action = 1; + required Type type = 2; + optional CheckpointCommandProto checkpointCmd = 3; +} + +/** + * Command returned from primary to checkpointing namenode. + * This command has checkpoint signature that identifies + * checkpoint transaction and is needed for further + * communication related to checkpointing. + */ +message CheckpointCommandProto { + // Unique signature to identify checkpoint transation + required CheckpointSignatureProto signature = 1; + + // If true, return transfer image to primary upon the completion of checkpoint + required bool needToReturnImage = 2; +} + +/** + * Block information + */ +message BlockProto { + required uint64 blockId = 1; + required uint64 genStamp = 2; + optional uint64 numBytes = 3; +} + +/** + * Block and datanodes where is it located + */ +message BlockWithLocationsProto { + required BlockProto block = 1; // Block + repeated DatanodeIDProto datanodeIDs = 2; // Datanodes with replicas of the block +} + +/** + * List of block with locations + */ +message BlocksWithLocationsProto { + repeated BlockWithLocationsProto blocks = 1; +} + +/** + * Editlog information with available transactions + */ +message RemoteEditLogProto { + required uint64 startTxId = 1; // Starting available edit log transaction + required uint64 endTxId = 2; // Ending available edit log transaction +} + +/** + * Enumeration of editlogs available on a remote namenode + */ +message RemoteEditLogManifestProto { + repeated RemoteEditLogProto logs = 1; +} + +/** + * Namespace information that describes namespace on a namenode + */ +message NamespaceInfoProto { + required string buildVersion = 1; // Software build version + required uint32 distUpgradeVersion = 2; // Distributed upgrade version + required string blockPoolID = 3; // block pool used by the namespace + required StorageInfoProto storageInfo = 4;// Noe information +} + +/** + * Block access token information + */ +message BlockKeyProto { + required uint32 keyId = 1; // Key identifier + required uint64 expiryDate = 2; // Expiry time in milliseconds + required bytes keyBytes = 3; // Key secret +} + +/** + * Current key and set of block keys at the namenode. + */ +message ExportedBlockKeysProto { + required bool isBlockTokenEnabled = 1; + required uint64 keyUpdateInterval = 2; + required uint64 tokenLifeTime = 3; + required BlockKeyProto currentKey = 4; + repeated BlockKeyProto allKeys = 5; +} + +/** + * State of a block replica at a datanode + */ +enum ReplicaState { + FINALIZED = 0; // State of a replica when it is not modified + RBW = 1; // State of replica that is being written to + RWR = 2; // State of replica that is waiting to be recovered + RUR = 3; // State of replica that is under recovery + TEMPORARY = 4; // State of replica that is created for replication +} + +/** + * Block that needs to be recovered with at a given location + */ +message RecoveringBlockProto { + required uint64 newGenStamp = 1; // New genstamp post recovery + required LocatedBlockProto block = 2; // Block to be recovered +} + diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/aop/org/apache/hadoop/hdfs/server/datanode/DataXceiverAspects.aj b/hadoop-hdfs-project/hadoop-hdfs/src/test/aop/org/apache/hadoop/hdfs/server/datanode/DataXceiverAspects.aj index c762e32385..e69de29bb2 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/aop/org/apache/hadoop/hdfs/server/datanode/DataXceiverAspects.aj +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/aop/org/apache/hadoop/hdfs/server/datanode/DataXceiverAspects.aj @@ -1,41 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hdfs.server.datanode; - -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; - -/** - * This aspect takes care about faults injected into datanode.DataXceiver - * class - */ -privileged public aspect DataXceiverAspects { - public static final Log LOG = LogFactory.getLog(DataXceiverAspects.class); - - pointcut runXceiverThread(DataXceiver xceiver) : - execution (* run(..)) && target(xceiver); - - void around (DataXceiver xceiver) : runXceiverThread(xceiver) { - if ("true".equals(System.getProperty("fi.enabledOOM"))) { - LOG.info("fi.enabledOOM is enabled"); - throw new OutOfMemoryError("Pretend there's no more memory"); - } else { - proceed(xceiver); - } - } -} \ No newline at end of file diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/aop/org/apache/hadoop/hdfs/server/datanode/TestFiDataTransferProtocol.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/aop/org/apache/hadoop/hdfs/server/datanode/TestFiDataTransferProtocol.java index b7e6277501..fcad32e0b4 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/aop/org/apache/hadoop/hdfs/server/datanode/TestFiDataTransferProtocol.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/aop/org/apache/hadoop/hdfs/server/datanode/TestFiDataTransferProtocol.java @@ -29,6 +29,7 @@ import org.apache.hadoop.fi.DataTransferTestUtil.VerificationAction; import org.apache.hadoop.fi.FiTestUtil; import org.apache.hadoop.fi.FiTestUtil.Action; +import org.apache.hadoop.fs.CommonConfigurationKeys; import org.apache.hadoop.fs.FSDataInputStream; import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FileSystem; @@ -56,8 +57,9 @@ public class TestFiDataTransferProtocol { static private FSDataOutputStream createFile(FileSystem fs, Path p ) throws IOException { - return fs.create(p, true, fs.getConf().getInt("io.file.buffer.size", 4096), - REPLICATION, BLOCKSIZE); + return fs.create(p, true, + fs.getConf().getInt(CommonConfigurationKeys.IO_FILE_BUFFER_SIZE_KEY, + 4096), REPLICATION, BLOCKSIZE); } { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/aop/org/apache/hadoop/hdfs/server/datanode/TestFiDataTransferProtocol2.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/aop/org/apache/hadoop/hdfs/server/datanode/TestFiDataTransferProtocol2.java index dcfdcf9e26..5832bf0491 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/aop/org/apache/hadoop/hdfs/server/datanode/TestFiDataTransferProtocol2.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/aop/org/apache/hadoop/hdfs/server/datanode/TestFiDataTransferProtocol2.java @@ -30,6 +30,7 @@ import org.apache.hadoop.fi.DataTransferTestUtil.SleepAction; import org.apache.hadoop.fi.DataTransferTestUtil.VerificationAction; import org.apache.hadoop.fi.FiTestUtil; +import static org.apache.hadoop.fs.CommonConfigurationKeys.IO_FILE_BUFFER_SIZE_KEY; import org.apache.hadoop.fs.FSDataInputStream; import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FileSystem; @@ -65,8 +66,8 @@ public class TestFiDataTransferProtocol2 { static private FSDataOutputStream createFile(FileSystem fs, Path p ) throws IOException { - return fs.create(p, true, fs.getConf().getInt("io.file.buffer.size", 4096), - REPLICATION, BLOCKSIZE); + return fs.create(p, true, fs.getConf() + .getInt(IO_FILE_BUFFER_SIZE_KEY, 4096), REPLICATION, BLOCKSIZE); } { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/aop/org/apache/hadoop/hdfs/server/datanode/TestFiDataXceiverServer.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/aop/org/apache/hadoop/hdfs/server/datanode/TestFiDataXceiverServer.java index 2f92fcf6ec..e69de29bb2 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/aop/org/apache/hadoop/hdfs/server/datanode/TestFiDataXceiverServer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/aop/org/apache/hadoop/hdfs/server/datanode/TestFiDataXceiverServer.java @@ -1,97 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hdfs.server.datanode; - -import static org.junit.Assert.assertTrue; - -import java.io.IOException; -import java.io.InputStream; -import java.net.InetAddress; -import java.net.InetSocketAddress; -import java.net.ServerSocket; -import java.net.Socket; -import java.net.SocketAddress; -import java.util.concurrent.CountDownLatch; - -import org.apache.hadoop.conf.Configuration; -import org.junit.Test; -import org.mockito.Mockito; - -/** - * This is a test for DataXceiverServer when DataXceiver thread spawning is - * failed due to OutOfMemoryError. Expected behavior is that DataXceiverServer - * should not be exited. It should retry again after 30 seconds - */ -public class TestFiDataXceiverServer { - - @Test(timeout = 30000) - public void testOutOfMemoryErrorInDataXceiverServerRun() throws Exception { - final CountDownLatch latch = new CountDownLatch(1); - ServerSocket sock = new ServerSocket() { - @Override - public Socket accept() throws IOException { - return new Socket() { - @Override - public InetAddress getInetAddress() { - return super.getLocalAddress(); - } - - @Override - public SocketAddress getRemoteSocketAddress() { - return new InetSocketAddress(8080); - } - - @Override - public SocketAddress getLocalSocketAddress() { - return new InetSocketAddress(0); - } - - @Override - public synchronized void close() throws IOException { - latch.countDown(); - super.close(); - } - - @Override - public InputStream getInputStream() throws IOException { - return null; - } - }; - } - }; - Thread thread = null; - System.setProperty("fi.enabledOOM", "true"); - DataNode dn = Mockito.mock(DataNode.class); - try { - Configuration conf = new Configuration(); - Mockito.doReturn(conf).when(dn).getConf(); - dn.shouldRun = true; - DataXceiverServer server = new DataXceiverServer(sock, conf, dn); - thread = new Thread(server); - thread.start(); - latch.await(); - assertTrue("Not running the thread", thread.isAlive()); - } finally { - System.setProperty("fi.enabledOOM", "false"); - dn.shouldRun = false; - if (null != thread) - thread.interrupt(); - sock.close(); - } - } -} \ No newline at end of file diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/CLITestHelperDFS.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/CLITestHelperDFS.java index fc3567ea5d..1dfa2952cd 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/CLITestHelperDFS.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/CLITestHelperDFS.java @@ -18,7 +18,6 @@ package org.apache.hadoop.cli; import org.apache.hadoop.cli.util.CLICommandDFSAdmin; -import org.apache.hadoop.cli.util.CLITestCmd; import org.xml.sax.SAXException; public class CLITestHelperDFS extends CLITestHelper { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/TestHDFSCLI.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/TestHDFSCLI.java index eb62f4063d..4bf16d7ea4 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/TestHDFSCLI.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/TestHDFSCLI.java @@ -29,7 +29,6 @@ import org.junit.After; import static org.junit.Assert.assertTrue; import org.junit.Before; -import org.junit.Test; public class TestHDFSCLI extends CLITestHelperDFS { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestFcHdfsSymlink.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestFcHdfsSymlink.java index 617b90026c..3f74789ae9 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestFcHdfsSymlink.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestFcHdfsSymlink.java @@ -20,6 +20,9 @@ import java.io.*; import java.net.URI; +import org.apache.commons.logging.impl.Log4JLogger; +import org.apache.log4j.Level; + import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileContext; @@ -28,9 +31,11 @@ import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.MiniDFSCluster; +import org.apache.hadoop.hdfs.server.namenode.NameNode; import org.apache.hadoop.hdfs.protocol.HdfsConstants; import static org.apache.hadoop.fs.FileContextTestHelper.*; import org.apache.hadoop.ipc.RemoteException; + import static org.junit.Assert.*; import org.junit.Test; import org.junit.BeforeClass; @@ -41,6 +46,10 @@ */ public class TestFcHdfsSymlink extends FileContextSymlinkBaseTest { + { + ((Log4JLogger)NameNode.stateChangeLog).getLogger().setLevel(Level.ALL); + } + private static MiniDFSCluster cluster; protected String getScheme() { @@ -250,8 +259,8 @@ public void testLinkOwner() throws IOException { Path link = new Path(testBaseDir1(), "symlinkToFile"); createAndWriteFile(file); fc.createSymlink(file, link, false); - FileStatus stat_file = fc.getFileStatus(file); - FileStatus stat_link = fc.getFileStatus(link); - assertEquals(stat_link.getOwner(), stat_file.getOwner()); + FileStatus statFile = fc.getFileStatus(file); + FileStatus statLink = fc.getFileStatus(link); + assertEquals(statLink.getOwner(), statFile.getOwner()); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/permission/TestStickyBit.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/permission/TestStickyBit.java index fb562d14b2..593134350d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/permission/TestStickyBit.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/permission/TestStickyBit.java @@ -163,7 +163,7 @@ public void testGeneralSBBehavior() throws IOException, InterruptedException { try { Configuration conf = new HdfsConfiguration(); conf.setBoolean(DFSConfigKeys.DFS_PERMISSIONS_ENABLED_KEY, true); - conf.setBoolean("dfs.support.append", true); + conf.setBoolean(DFSConfigKeys.DFS_SUPPORT_APPEND_KEY, true); cluster = new MiniDFSCluster.Builder(conf).numDataNodes(4).build(); FileSystem hdfs = cluster.getFileSystem(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/AppendTestUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/AppendTestUtil.java index 5369a9e2e4..384cfe75b9 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/AppendTestUtil.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/AppendTestUtil.java @@ -143,8 +143,8 @@ static byte[] initBuffer(int size) { public static FSDataOutputStream createFile(FileSystem fileSys, Path name, int repl) throws IOException { return fileSys.create(name, true, - fileSys.getConf().getInt("io.file.buffer.size", 4096), - (short) repl, (long) BLOCK_SIZE); + fileSys.getConf().getInt(CommonConfigurationKeys.IO_FILE_BUFFER_SIZE_KEY, 4096), + (short) repl, BLOCK_SIZE); } /** diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/BlockReaderTestUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/BlockReaderTestUtil.java index 3c338e56f5..9d4f4a2e19 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/BlockReaderTestUtil.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/BlockReaderTestUtil.java @@ -30,6 +30,7 @@ import org.apache.hadoop.hdfs.protocol.LocatedBlock; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants; import org.apache.hadoop.hdfs.server.datanode.DataNode; +import org.apache.hadoop.fs.CommonConfigurationKeys; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.net.NetUtils; @@ -148,7 +149,7 @@ public BlockReader getBlockReader(LocatedBlock testBlock, int offset, int lenToR sock, targetAddr.toString()+ ":" + block.getBlockId(), block, testBlock.getBlockToken(), offset, lenToRead, - conf.getInt("io.file.buffer.size", 4096), + conf.getInt(CommonConfigurationKeys.IO_FILE_BUFFER_SIZE_KEY, 4096), true, ""); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java index c7566d2c62..409dd37525 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java @@ -84,6 +84,7 @@ public class DFSTestUtil { private int maxLevels;// = 3; private int maxSize;// = 8*1024; + private int minSize = 1; private int nFiles; private MyFile[] files; @@ -139,7 +140,7 @@ private class MyFile { long fidx = -1; while (fidx < 0) { fidx = gen.nextLong(); } name = name + Long.toString(fidx); - size = gen.nextInt(maxSize); + size = minSize + gen.nextInt(maxSize - minSize); seed = gen.nextLong(); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/FileAppendTest4.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/FileAppendTest4.java index 7f2c1aecd6..d837c0f71a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/FileAppendTest4.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/FileAppendTest4.java @@ -20,6 +20,7 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.CommonConfigurationKeys; import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.Path; import org.junit.AfterClass; @@ -66,7 +67,7 @@ public static void startUp () throws IOException { } @AfterClass - public static void tearDown() throws IOException { + public static void tearDown() { cluster.shutdown(); } @@ -91,7 +92,7 @@ public void testAppend() throws IOException { new Path("foo"+ oldFileLen +"_"+ flushedBytes1 +"_"+ flushedBytes2); LOG.info("Creating file " + p); FSDataOutputStream out = fs.create(p, false, - conf.getInt("io.file.buffer.size", 4096), + conf.getInt(CommonConfigurationKeys.IO_FILE_BUFFER_SIZE_KEY, 4096), REPLICATION, BLOCK_SIZE); out.write(contents, 0, oldFileLen); out.close(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java index 8f5f9f8fda..df913b37d5 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java @@ -36,21 +36,22 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; -import org.apache.commons.math.stat.descriptive.rank.Min; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.fs.CommonConfigurationKeys; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FileUtil; +import static org.apache.hadoop.hdfs.DFSConfigKeys.*; import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.protocol.BlockListAsLongs; import org.apache.hadoop.hdfs.protocol.ClientProtocol; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.ExtendedBlock; import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType; -import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption; +import org.apache.hadoop.hdfs.protocolR23Compatible.ClientDatanodeWireProtocol; +import org.apache.hadoop.hdfs.protocolR23Compatible.ClientNamenodeWireProtocol; import org.apache.hadoop.hdfs.server.common.Storage; +import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption; import org.apache.hadoop.hdfs.server.datanode.DataNode; import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils; import org.apache.hadoop.hdfs.server.datanode.DataStorage; @@ -323,8 +324,8 @@ public MiniDFSCluster() { * Servers will be started on free ports. *

* The caller must manage the creation of NameNode and DataNode directories - * and have already set {@link DFSConfigKeys#DFS_NAMENODE_NAME_DIR_KEY} and - * {@link DFSConfigKeys#DFS_DATANODE_DATA_DIR_KEY} in the given conf. + * and have already set {@link #DFS_NAMENODE_NAME_DIR_KEY} and + * {@link #DFS_DATANODE_DATA_DIR_KEY} in the given conf. * * @param conf the base configuration to use in starting the servers. This * will be modified as necessary. @@ -398,8 +399,8 @@ public MiniDFSCluster(Configuration conf, * @param format if true, format the NameNode and DataNodes before starting * up * @param manageDfsDirs if true, the data directories for servers will be - * created and {@link DFSConfigKeys#DFS_NAMENODE_NAME_DIR_KEY} and - * {@link DFSConfigKeys#DFS_DATANODE_DATA_DIR_KEY} will be set in + * created and {@link #DFS_NAMENODE_NAME_DIR_KEY} and + * {@link #DFS_DATANODE_DATA_DIR_KEY} will be set in * the conf * @param operation the operation with which to start the servers. If null * or StartupOption.FORMAT, then StartupOption.REGULAR will be used. @@ -430,8 +431,8 @@ public MiniDFSCluster(int nameNodePort, * @param numDataNodes Number of DataNodes to start; may be zero * @param format if true, format the NameNode and DataNodes before starting up * @param manageDfsDirs if true, the data directories for servers will be - * created and {@link DFSConfigKeys#DFS_NAMENODE_NAME_DIR_KEY} and - * {@link DFSConfigKeys#DFS_DATANODE_DATA_DIR_KEY} will be set in + * created and {@link #DFS_NAMENODE_NAME_DIR_KEY} and + * {@link #DFS_DATANODE_DATA_DIR_KEY} will be set in * the conf * @param operation the operation with which to start the servers. If null * or StartupOption.FORMAT, then StartupOption.REGULAR will be used. @@ -464,11 +465,11 @@ public MiniDFSCluster(int nameNodePort, * @param numDataNodes Number of DataNodes to start; may be zero * @param format if true, format the NameNode and DataNodes before starting up * @param manageNameDfsDirs if true, the data directories for servers will be - * created and {@link DFSConfigKeys#DFS_NAMENODE_NAME_DIR_KEY} and - * {@link DFSConfigKeys#DFS_DATANODE_DATA_DIR_KEY} will be set in + * created and {@link #DFS_NAMENODE_NAME_DIR_KEY} and + * {@link #DFS_DATANODE_DATA_DIR_KEY} will be set in * the conf * @param manageDataDfsDirs if true, the data directories for datanodes will - * be created and {@link DFSConfigKeys#DFS_DATANODE_DATA_DIR_KEY} + * be created and {@link #DFS_DATANODE_DATA_DIR_KEY} * set to same in the conf * @param operation the operation with which to start the servers. If null * or StartupOption.FORMAT, then StartupOption.REGULAR will be used. @@ -513,6 +514,8 @@ private void initMiniDFSCluster(int nameNodePort, int nameNodeHttpPort, try { Class rpcEngine = conf.getClassByName(rpcEngineName); setRpcEngine(conf, NamenodeProtocols.class, rpcEngine); + setRpcEngine(conf, ClientNamenodeWireProtocol.class, rpcEngine); + setRpcEngine(conf, ClientDatanodeWireProtocol.class, rpcEngine); setRpcEngine(conf, NamenodeProtocol.class, rpcEngine); setRpcEngine(conf, ClientProtocol.class, rpcEngine); setRpcEngine(conf, DatanodeProtocol.class, rpcEngine); @@ -524,15 +527,15 @@ private void initMiniDFSCluster(int nameNodePort, int nameNodeHttpPort, } // disable service authorization, as it does not work with tunnelled RPC - conf.setBoolean(CommonConfigurationKeys.HADOOP_SECURITY_AUTHORIZATION, + conf.setBoolean(HADOOP_SECURITY_AUTHORIZATION, false); } - int replication = conf.getInt(DFSConfigKeys.DFS_REPLICATION_KEY, 3); - conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, Math.min(replication, numDataNodes)); - conf.setInt(DFSConfigKeys.DFS_NAMENODE_SAFEMODE_EXTENSION_KEY, 0); - conf.setInt(DFSConfigKeys.DFS_NAMENODE_DECOMMISSION_INTERVAL_KEY, 3); // 3 second - conf.setClass(DFSConfigKeys.NET_TOPOLOGY_NODE_SWITCH_MAPPING_IMPL_KEY, + int replication = conf.getInt(DFS_REPLICATION_KEY, 3); + conf.setInt(DFS_REPLICATION_KEY, Math.min(replication, numDataNodes)); + conf.setInt(DFS_NAMENODE_SAFEMODE_EXTENSION_KEY, 0); + conf.setInt(DFS_NAMENODE_DECOMMISSION_INTERVAL_KEY, 3); // 3 second + conf.setClass(NET_TOPOLOGY_NODE_SWITCH_MAPPING_IMPL_KEY, StaticMapping.class, DNSToSwitchMapping.class); Collection nameserviceIds = DFSUtil.getNameServiceIds(conf); @@ -540,8 +543,8 @@ private void initMiniDFSCluster(int nameNodePort, int nameNodeHttpPort, federation = true; if (!federation) { - conf.set(DFSConfigKeys.FS_DEFAULT_NAME_KEY, "127.0.0.1:" + nameNodePort); - conf.set(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY, "127.0.0.1:" + conf.set(FS_DEFAULT_NAME_KEY, "127.0.0.1:" + nameNodePort); + conf.set(DFS_NAMENODE_HTTP_ADDRESS_KEY, "127.0.0.1:" + nameNodeHttpPort); NameNode nn = createNameNode(0, conf, numDataNodes, manageNameDfsDirs, format, operation, clusterId); @@ -585,7 +588,7 @@ private static void initFederationConf(Configuration conf, initFederatedNamenodeAddress(conf, nameserviceId, nnPort); nnPort = nnPort == 0 ? 0 : nnPort + 2; } - conf.set(DFSConfigKeys.DFS_FEDERATION_NAMESERVICES, nameserviceIdList); + conf.set(DFS_FEDERATION_NAMESERVICES, nameserviceIdList); } /* For federated namenode initialize the address:port */ @@ -593,11 +596,11 @@ private static void initFederatedNamenodeAddress(Configuration conf, String nameserviceId, int nnPort) { // Set nameserviceId specific key String key = DFSUtil.addKeySuffixes( - DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY, nameserviceId); + DFS_NAMENODE_HTTP_ADDRESS_KEY, nameserviceId); conf.set(key, "127.0.0.1:0"); key = DFSUtil.addKeySuffixes( - DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY, nameserviceId); + DFS_NAMENODE_RPC_ADDRESS_KEY, nameserviceId); conf.set(key, "127.0.0.1:" + nnPort); } @@ -618,10 +621,10 @@ private NameNode createNameNode(int nnIndex, Configuration conf, StartupOption operation, String clusterId) throws IOException { if (manageNameDfsDirs) { - conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, + conf.set(DFS_NAMENODE_NAME_DIR_KEY, fileAsURI(new File(base_dir, "name" + (2*nnIndex + 1)))+","+ fileAsURI(new File(base_dir, "name" + (2*nnIndex + 2)))); - conf.set(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_DIR_KEY, + conf.set(DFS_NAMENODE_CHECKPOINT_DIR_KEY, fileAsURI(new File(base_dir, "namesecondary" + (2*nnIndex + 1)))+","+ fileAsURI(new File(base_dir, "namesecondary" + (2*nnIndex + 2)))); } @@ -646,17 +649,17 @@ private void createFederatedNameNode(int nnIndex, Configuration conf, int numDataNodes, boolean manageNameDfsDirs, boolean format, StartupOption operation, String clusterId, String nameserviceId) throws IOException { - conf.set(DFSConfigKeys.DFS_FEDERATION_NAMESERVICE_ID, nameserviceId); + conf.set(DFS_FEDERATION_NAMESERVICE_ID, nameserviceId); NameNode nn = createNameNode(nnIndex, conf, numDataNodes, manageNameDfsDirs, format, operation, clusterId); conf.set(DFSUtil.addKeySuffixes( - DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY, nameserviceId), NameNode + DFS_NAMENODE_RPC_ADDRESS_KEY, nameserviceId), NameNode .getHostPortString(nn.getNameNodeAddress())); conf.set(DFSUtil.addKeySuffixes( - DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY, nameserviceId), NameNode + DFS_NAMENODE_HTTP_ADDRESS_KEY, nameserviceId), NameNode .getHostPortString(nn.getHttpAddress())); DFSUtil.setGenericConf(conf, nameserviceId, - DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY); + DFS_NAMENODE_HTTP_ADDRESS_KEY); nameNodes[nnIndex] = new NameNodeInfo(nn, new Configuration(conf)); } @@ -736,7 +739,7 @@ public void waitClusterUp() { * will be modified as necessary. * @param numDataNodes Number of DataNodes to start; may be zero * @param manageDfsDirs if true, the data directories for DataNodes will be - * created and {@link DFSConfigKeys#DFS_DATANODE_DATA_DIR_KEY} will be set + * created and {@link #DFS_DATANODE_DATA_DIR_KEY} will be set * in the conf * @param operation the operation with which to start the DataNodes. If null * or StartupOption.FORMAT, then StartupOption.REGULAR will be used. @@ -768,7 +771,7 @@ public synchronized void startDataNodes(Configuration conf, int numDataNodes, * will be modified as necessary. * @param numDataNodes Number of DataNodes to start; may be zero * @param manageDfsDirs if true, the data directories for DataNodes will be - * created and {@link DFSConfigKeys#DFS_DATANODE_DATA_DIR_KEY} will be + * created and {@link #DFS_DATANODE_DATA_DIR_KEY} will be * set in the conf * @param operation the operation with which to start the DataNodes. If null * or StartupOption.FORMAT, then StartupOption.REGULAR will be used. @@ -802,7 +805,7 @@ public synchronized void startDataNodes(Configuration conf, int numDataNodes, * will be modified as necessary. * @param numDataNodes Number of DataNodes to start; may be zero * @param manageDfsDirs if true, the data directories for DataNodes will be - * created and {@link DFSConfigKeys#DFS_DATANODE_DATA_DIR_KEY} will be + * created and {@link #DFS_DATANODE_DATA_DIR_KEY} will be * set in the conf * @param operation the operation with which to start the DataNodes. If null * or StartupOption.FORMAT, then StartupOption.REGULAR will be used. @@ -820,12 +823,12 @@ public synchronized void startDataNodes(Configuration conf, int numDataNodes, long[] simulatedCapacities, boolean setupHostsFile, boolean checkDataNodeAddrConfig) throws IOException { - conf.set(DFSConfigKeys.DFS_DATANODE_HOST_NAME_KEY, "127.0.0.1"); + conf.set(DFS_DATANODE_HOST_NAME_KEY, "127.0.0.1"); int curDatanodesNum = dataNodes.size(); // for mincluster's the default initialDelay for BRs is 0 - if (conf.get(DFSConfigKeys.DFS_BLOCKREPORT_INITIAL_DELAY_KEY) == null) { - conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INITIAL_DELAY_KEY, 0); + if (conf.get(DFS_BLOCKREPORT_INITIAL_DELAY_KEY) == null) { + conf.setLong(DFS_BLOCKREPORT_INITIAL_DELAY_KEY, 0); } // If minicluster's name node is null assume that the conf has been // set with the right address:port of the name node. @@ -872,8 +875,8 @@ public synchronized void startDataNodes(Configuration conf, int numDataNodes, + i + ": " + dir1 + " or " + dir2); } String dirs = fileAsURI(dir1) + "," + fileAsURI(dir2); - dnConf.set(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY, dirs); - conf.set(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY, dirs); + dnConf.set(DFS_DATANODE_DATA_DIR_KEY, dirs); + conf.set(DFS_DATANODE_DATA_DIR_KEY, dirs); } if (simulatedCapacities != null) { dnConf.setBoolean(SimulatedFSDataset.CONFIG_PROPERTY_SIMULATED, true); @@ -902,7 +905,7 @@ public synchronized void startDataNodes(Configuration conf, int numDataNodes, DataNode dn = DataNode.instantiateDataNode(dnArgs, dnConf); if(dn == null) throw new IOException("Cannot start DataNode in " - + dnConf.get(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY)); + + dnConf.get(DFS_DATANODE_DATA_DIR_KEY)); //since the HDFS does things based on IP:port, we need to add the mapping //for IP:port to rackId String ipAddr = dn.getSelfAddr().getAddress().getHostAddress(); @@ -1318,7 +1321,7 @@ public synchronized boolean restartDataNode(DataNodeProperties dnprop, Configuration newconf = new HdfsConfiguration(conf); // save cloned config if (keepPort) { InetSocketAddress addr = dnprop.datanode.getSelfAddr(); - conf.set("dfs.datanode.address", addr.getAddress().getHostAddress() + ":" + conf.set(DFS_DATANODE_ADDRESS_KEY, addr.getAddress().getHostAddress() + ":" + addr.getPort()); } dataNodes.add(new DataNodeProperties(DataNode.createDataNode(args, conf), @@ -1445,10 +1448,10 @@ public FileSystem getNewFileSystemInstance(int nnIndex) throws IOException { /** * @return a http URL */ - public String getHttpUri(int nnIndex) throws IOException { + public String getHttpUri(int nnIndex) { return "http://" + nameNodes[nnIndex].conf - .get(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY); + .get(DFS_NAMENODE_HTTP_ADDRESS_KEY); } /** @@ -1457,7 +1460,7 @@ public String getHttpUri(int nnIndex) throws IOException { public HftpFileSystem getHftpFileSystem(int nnIndex) throws IOException { String uri = "hftp://" + nameNodes[nnIndex].conf - .get(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY); + .get(DFS_NAMENODE_HTTP_ADDRESS_KEY); try { return (HftpFileSystem)FileSystem.get(new URI(uri), conf); } catch (URISyntaxException e) { @@ -1907,9 +1910,9 @@ public NameNode addNameNode(Configuration conf, int namenodePort) nameNodes = newlist; String nameserviceId = NAMESERVICE_ID_PREFIX + (nnIndex + 1); - String nameserviceIds = conf.get(DFSConfigKeys.DFS_FEDERATION_NAMESERVICES); + String nameserviceIds = conf.get(DFS_FEDERATION_NAMESERVICES); nameserviceIds += "," + nameserviceId; - conf.set(DFSConfigKeys.DFS_FEDERATION_NAMESERVICES, nameserviceIds); + conf.set(DFS_FEDERATION_NAMESERVICES, nameserviceIds); initFederatedNamenodeAddress(conf, nameserviceId, namenodePort); createFederatedNameNode(nnIndex, conf, numDataNodes, true, true, null, @@ -1942,28 +1945,28 @@ private int getFreeSocketPort() { private void setupDatanodeAddress(Configuration conf, boolean setupHostsFile, boolean checkDataNodeAddrConfig) throws IOException { if (setupHostsFile) { - String hostsFile = conf.get(DFSConfigKeys.DFS_HOSTS, "").trim(); + String hostsFile = conf.get(DFS_HOSTS, "").trim(); if (hostsFile.length() == 0) { throw new IOException("Parameter dfs.hosts is not setup in conf"); } // Setup datanode in the include file, if it is defined in the conf String address = "127.0.0.1:" + getFreeSocketPort(); if (checkDataNodeAddrConfig) { - conf.setIfUnset("dfs.datanode.address", address); + conf.setIfUnset(DFS_DATANODE_ADDRESS_KEY, address); } else { - conf.set("dfs.datanode.address", address); + conf.set(DFS_DATANODE_ADDRESS_KEY, address); } addToFile(hostsFile, address); LOG.info("Adding datanode " + address + " to hosts file " + hostsFile); } else { if (checkDataNodeAddrConfig) { - conf.setIfUnset("dfs.datanode.address", "127.0.0.1:0"); - conf.setIfUnset("dfs.datanode.http.address", "127.0.0.1:0"); - conf.setIfUnset("dfs.datanode.ipc.address", "127.0.0.1:0"); + conf.setIfUnset(DFS_DATANODE_ADDRESS_KEY, "127.0.0.1:0"); + conf.setIfUnset(DFS_DATANODE_HTTP_ADDRESS_KEY, "127.0.0.1:0"); + conf.setIfUnset(DFS_DATANODE_IPC_ADDRESS_KEY, "127.0.0.1:0"); } else { - conf.set("dfs.datanode.address", "127.0.0.1:0"); - conf.set("dfs.datanode.http.address", "127.0.0.1:0"); - conf.set("dfs.datanode.ipc.address", "127.0.0.1:0"); + conf.set(DFS_DATANODE_ADDRESS_KEY, "127.0.0.1:0"); + conf.set(DFS_DATANODE_HTTP_ADDRESS_KEY, "127.0.0.1:0"); + conf.set(DFS_DATANODE_IPC_ADDRESS_KEY, "127.0.0.1:0"); } } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBalancerBandwidth.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBalancerBandwidth.java index afb58c4555..117952a7e7 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBalancerBandwidth.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBalancerBandwidth.java @@ -25,7 +25,6 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.server.datanode.DataNode; import org.apache.hadoop.hdfs.DFSConfigKeys; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockMissingException.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockMissingException.java index 64e8588790..be6e741a0a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockMissingException.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockMissingException.java @@ -24,6 +24,7 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.fs.CommonConfigurationKeys; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.FSDataOutputStream; @@ -80,9 +81,9 @@ public void testBlockMissingException() throws Exception { // private void createOldFile(FileSystem fileSys, Path name, int repl, int numBlocks, long blocksize) throws IOException { - FSDataOutputStream stm = fileSys.create(name, true, - fileSys.getConf().getInt("io.file.buffer.size", 4096), - (short)repl, blocksize); + FSDataOutputStream stm = fileSys.create(name, true, fileSys.getConf() + .getInt(CommonConfigurationKeys.IO_FILE_BUFFER_SIZE_KEY, 4096), + (short) repl, blocksize); // fill data into file final byte[] b = new byte[(int)blocksize]; for (int i = 0; i < numBlocks; i++) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestClientProtocolForPipelineRecovery.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestClientProtocolForPipelineRecovery.java index e7988f99bb..259d26a304 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestClientProtocolForPipelineRecovery.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestClientProtocolForPipelineRecovery.java @@ -25,7 +25,6 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.hdfs.protocol.ExtendedBlock; import org.apache.hadoop.hdfs.server.namenode.LeaseExpiredException; -import org.apache.hadoop.hdfs.server.namenode.NameNode; import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols; import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.io.IOUtils; @@ -41,7 +40,7 @@ public class TestClientProtocolForPipelineRecovery { @Test public void testGetNewStamp() throws IOException { int numDataNodes = 1; Configuration conf = new HdfsConfiguration(); - conf.setBoolean("dfs.support.append", true); + conf.setBoolean(DFSConfigKeys.DFS_SUPPORT_APPEND_KEY, true); MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDataNodes).build(); try { cluster.waitActive(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestClientReportBadBlock.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestClientReportBadBlock.java index 97387ccfc1..33ee5c95a3 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestClientReportBadBlock.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestClientReportBadBlock.java @@ -28,6 +28,7 @@ import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.ChecksumException; +import org.apache.hadoop.fs.CommonConfigurationKeys; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.UnresolvedLinkException; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; @@ -80,7 +81,7 @@ public void startUpCluster() throws IOException { .build(); cluster.waitActive(); dfs = (DistributedFileSystem) cluster.getFileSystem(); - buffersize = conf.getInt("io.file.buffer.size", 4096); + buffersize = conf.getInt(CommonConfigurationKeys.IO_FILE_BUFFER_SIZE_KEY, 4096); } @After diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestConnCache.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestConnCache.java index 136a72205c..6673bf547b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestConnCache.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestConnCache.java @@ -20,8 +20,6 @@ import java.net.InetSocketAddress; import java.net.Socket; import java.io.IOException; -import java.util.List; - import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; @@ -31,11 +29,11 @@ import org.apache.hadoop.hdfs.DFSClient; import org.apache.hadoop.hdfs.DFSInputStream; import org.apache.hadoop.hdfs.SocketCache; -import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.protocol.ExtendedBlock; import org.apache.hadoop.hdfs.protocol.LocatedBlock; import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier; import org.apache.hadoop.hdfs.server.datanode.DataNode; +import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.security.token.Token; import org.junit.Test; @@ -212,6 +210,7 @@ public void testReadFromOneDN() throws IOException { MockGetBlockReader answer = new MockGetBlockReader(); Mockito.doAnswer(answer).when(in).getBlockReader( (InetSocketAddress) Matchers.anyObject(), + (DatanodeInfo) Matchers.anyObject(), Matchers.anyString(), (ExtendedBlock) Matchers.anyObject(), (Token) Matchers.anyObject(), diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSAddressConfig.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSAddressConfig.java index 1407fd46a0..0f0caa673b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSAddressConfig.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSAddressConfig.java @@ -26,11 +26,11 @@ import java.io.IOException; import java.util.ArrayList; -import junit.framework.Assert; import junit.framework.TestCase; -import java.net.InetSocketAddress; - import org.apache.hadoop.conf.Configuration; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_ADDRESS_KEY; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_HTTP_ADDRESS_KEY; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_IPC_ADDRESS_KEY; import org.apache.hadoop.hdfs.server.datanode.DataNode; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption; import org.apache.hadoop.hdfs.MiniDFSCluster.DataNodeProperties; @@ -65,9 +65,9 @@ public void testDFSAddressConfig() throws IOException { assertNotNull("Should have been able to stop simulated datanode", dnp); } - conf.unset("dfs.datanode.address"); - conf.unset("dfs.datanode.http.address"); - conf.unset("dfs.datanode.ipc.address"); + conf.unset(DFS_DATANODE_ADDRESS_KEY); + conf.unset(DFS_DATANODE_HTTP_ADDRESS_KEY); + conf.unset(DFS_DATANODE_IPC_ADDRESS_KEY); cluster.startDataNodes(conf, 1, true, StartupOption.REGULAR, null, null, null, false, true); @@ -90,9 +90,9 @@ public void testDFSAddressConfig() throws IOException { assertNotNull("Should have been able to stop simulated datanode", dnp); } - conf.set("dfs.datanode.address","0.0.0.0:0"); - conf.set("dfs.datanode.http.address","0.0.0.0:0"); - conf.set("dfs.datanode.ipc.address","0.0.0.0:0"); + conf.set(DFS_DATANODE_ADDRESS_KEY, "0.0.0.0:0"); + conf.set(DFS_DATANODE_HTTP_ADDRESS_KEY, "0.0.0.0:0"); + conf.set(DFS_DATANODE_IPC_ADDRESS_KEY, "0.0.0.0:0"); cluster.startDataNodes(conf, 1, true, StartupOption.REGULAR, null, null, null, false, true); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java index 9cc1b2999c..af12badddb 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java @@ -43,6 +43,7 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.CommonConfigurationKeys; import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FileChecksum; import org.apache.hadoop.fs.FileSystem; @@ -58,10 +59,7 @@ import org.apache.hadoop.hdfs.server.namenode.NameNode; import org.apache.hadoop.hdfs.server.namenode.NotReplicatedYetException; import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols; -import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.ipc.RemoteException; -import org.apache.hadoop.ipc.Client; -import org.apache.hadoop.ipc.ProtocolSignature; import org.apache.hadoop.ipc.RPC; import org.apache.hadoop.ipc.Server; import org.apache.hadoop.net.NetUtils; @@ -144,7 +142,7 @@ public void testWriteTimeoutAtDataNode() throws IOException, conf.setInt(DFSConfigKeys.DFS_CLIENT_MAX_BLOCK_ACQUIRE_FAILURES_KEY, 1); // set a small buffer size final int bufferSize = 4096; - conf.setInt("io.file.buffer.size", bufferSize); + conf.setInt(CommonConfigurationKeys.IO_FILE_BUFFER_SIZE_KEY, bufferSize); MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSPermission.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSPermission.java index 495e8e191a..38a837247a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSPermission.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSPermission.java @@ -22,14 +22,13 @@ import java.util.Map; import java.util.Random; -import javax.security.auth.login.LoginException; - import junit.framework.AssertionFailedError; import junit.framework.TestCase; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.CommonConfigurationKeys; import org.apache.hadoop.fs.FSDataInputStream; import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FileStatus; @@ -202,7 +201,7 @@ private void create(OpType op, Path name, short umask, switch (op) { case CREATE: FSDataOutputStream out = fs.create(name, permission, true, - conf.getInt("io.file.buffer.size", 4096), + conf.getInt(CommonConfigurationKeys.IO_FILE_BUFFER_SIZE_KEY, 4096), fs.getDefaultReplication(), fs.getDefaultBlockSize(), null); out.close(); break; @@ -520,8 +519,7 @@ protected void set(Path path, short ancestorPermission, } /* Perform an operation and verify if the permission checking is correct */ - void verifyPermission(UserGroupInformation ugi) throws LoginException, - IOException { + void verifyPermission(UserGroupInformation ugi) throws IOException { if (this.ugi != ugi) { setRequiredPermissions(ugi); this.ugi = ugi; @@ -564,8 +562,7 @@ protected boolean expectPermissionDeny() { } /* Set the permissions required to pass the permission checking */ - protected void setRequiredPermissions(UserGroupInformation ugi) - throws IOException { + protected void setRequiredPermissions(UserGroupInformation ugi) { if (SUPERUSER.equals(ugi)) { requiredAncestorPermission = SUPER_MASK; requiredParentPermission = SUPER_MASK; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSRollback.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSRollback.java index a8f814b652..b07bad252e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSRollback.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSRollback.java @@ -30,7 +30,6 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.hdfs.server.common.StorageInfo; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUpgradeFromImage.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUpgradeFromImage.java index 6ad08cd2aa..567fbabddd 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUpgradeFromImage.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUpgradeFromImage.java @@ -33,7 +33,6 @@ import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption; import org.apache.hadoop.hdfs.server.namenode.FSImageTestUtil; -import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.util.StringUtils; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDataTransferProtocol.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDataTransferProtocol.java index 4333f1c0f9..089ab4d837 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDataTransferProtocol.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDataTransferProtocol.java @@ -31,7 +31,6 @@ import junit.framework.TestCase; -import org.apache.commons.digester.SetRootRule; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; @@ -209,7 +208,7 @@ private void testWrite(ExtendedBlock block, BlockConstructionStage stage, long n @Test public void testOpWrite() throws IOException { int numDataNodes = 1; Configuration conf = new HdfsConfiguration(); - conf.setBoolean("dfs.support.append", true); + conf.setBoolean(DFSConfigKeys.DFS_SUPPORT_APPEND_KEY, true); MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDataNodes).build(); try { cluster.waitActive(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeDeath.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeDeath.java index b061f26783..e271bb95a8 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeDeath.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeDeath.java @@ -25,6 +25,7 @@ import org.apache.commons.logging.impl.Log4JLogger; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.BlockLocation; +import org.apache.hadoop.fs.CommonConfigurationKeys; import org.apache.hadoop.fs.FSDataInputStream; import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FileSystem; @@ -123,9 +124,9 @@ public synchronized long getStamp() { static private FSDataOutputStream createFile(FileSystem fileSys, Path name, short repl) throws IOException { // create and write a file that contains three blocks of data - FSDataOutputStream stm = fileSys.create(name, true, - fileSys.getConf().getInt("io.file.buffer.size", 4096), - repl, (long)blockSize); + FSDataOutputStream stm = fileSys.create(name, true, fileSys.getConf() + .getInt(CommonConfigurationKeys.IO_FILE_BUFFER_SIZE_KEY, 4096), repl, + blockSize); return stm; } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommission.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommission.java index 3069727a48..faf7efd536 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommission.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommission.java @@ -30,6 +30,7 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.CommonConfigurationKeys; import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; @@ -115,9 +116,9 @@ private void writeConfigFile(Path name, ArrayList nodes) private void writeFile(FileSystem fileSys, Path name, int repl) throws IOException { // create and write a file that contains three blocks of data - FSDataOutputStream stm = fileSys.create(name, true, - fileSys.getConf().getInt("io.file.buffer.size", 4096), - (short)repl, (long)blockSize); + FSDataOutputStream stm = fileSys.create(name, true, fileSys.getConf() + .getInt(CommonConfigurationKeys.IO_FILE_BUFFER_SIZE_KEY, 4096), + (short) repl, blockSize); byte[] buffer = new byte[fileSize]; Random rand = new Random(seed); rand.nextBytes(buffer); @@ -246,7 +247,7 @@ private void recomissionNode(DatanodeInfo decommissionedNode) throws IOException * Wait till node is fully decommissioned. */ private void waitNodeState(DatanodeInfo node, - AdminStates state) throws IOException { + AdminStates state) { boolean done = state == node.getAdminState(); while (!done) { LOG.info("Waiting for node " + node + " to change state to " diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDeprecatedKeys.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDeprecatedKeys.java index 474b2f5086..518adddf10 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDeprecatedKeys.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDeprecatedKeys.java @@ -20,9 +20,6 @@ import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.conf.Configuration; -import org.apache.commons.logging.impl.Log4JLogger; -import org.apache.log4j.Level; - import junit.framework.TestCase; public class TestDeprecatedKeys extends TestCase { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystem.java index aaa085f1e7..4055cd8d3d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystem.java @@ -31,6 +31,7 @@ import org.apache.commons.logging.impl.Log4JLogger; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.CommonConfigurationKeys; import org.apache.hadoop.fs.FSDataInputStream; import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FileChecksum; @@ -88,17 +89,17 @@ public void testFileSystemCloseAll() throws Exception { @Test public void testDFSClose() throws Exception { Configuration conf = getTestConfiguration(); - MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build(); - FileSystem fileSys = cluster.getFileSystem(); - + MiniDFSCluster cluster = null; try { + cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build(); + FileSystem fileSys = cluster.getFileSystem(); + // create two files fileSys.create(new Path("/test/dfsclose/file-0")); fileSys.create(new Path("/test/dfsclose/file-1")); fileSys.close(); - } - finally { + } finally { if (cluster != null) {cluster.shutdown();} } } @@ -106,10 +107,10 @@ public void testDFSClose() throws Exception { @Test public void testDFSSeekExceptions() throws IOException { Configuration conf = getTestConfiguration(); - MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build(); - FileSystem fileSys = cluster.getFileSystem(); - + MiniDFSCluster cluster = null; try { + cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build(); + FileSystem fileSys = cluster.getFileSystem(); String file = "/test/fileclosethenseek/file-0"; Path path = new Path(file); // create file @@ -455,7 +456,7 @@ public FileSystem run() throws Exception { final Path dir = new Path("/filechecksum"); final int block_size = 1024; - final int buffer_size = conf.getInt("io.file.buffer.size", 4096); + final int buffer_size = conf.getInt(CommonConfigurationKeys.IO_FILE_BUFFER_SIZE_KEY, 4096); conf.setInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, 512); //try different number of blocks diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFSInputChecker.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFSInputChecker.java index 335d8a58ac..fea024c2c7 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFSInputChecker.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFSInputChecker.java @@ -21,14 +21,13 @@ import java.io.IOException; import java.io.InputStream; import java.io.RandomAccessFile; -import java.util.EnumSet; import java.util.Random; import junit.framework.TestCase; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.ChecksumException; -import org.apache.hadoop.fs.CreateFlag; +import org.apache.hadoop.fs.CommonConfigurationKeys; import org.apache.hadoop.fs.FSDataInputStream; import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FileSystem; @@ -56,7 +55,7 @@ public class TestFSInputChecker extends TestCase { private void writeFile(FileSystem fileSys, Path name) throws IOException { // create and write a file that contains three blocks of data FSDataOutputStream stm = fileSys.create(name, new FsPermission((short)0777), - true, fileSys.getConf().getInt("io.file.buffer.size", 4096), + true, fileSys.getConf().getInt(CommonConfigurationKeys.IO_FILE_BUFFER_SIZE_KEY, 4096), NUM_OF_DATANODES, BLOCK_SIZE, null); stm.write(expected); stm.close(); @@ -327,8 +326,10 @@ private void testSeekAndRead(FileSystem fileSys) throws IOException { Path file = new Path("try.dat"); writeFile(fileSys, file); - stm = fileSys.open(file, - fileSys.getConf().getInt("io.file.buffer.size", 4096)); + stm = fileSys.open( + file, + fileSys.getConf().getInt( + CommonConfigurationKeys.IO_FILE_BUFFER_SIZE_KEY, 4096)); checkSeekAndRead(); stm.close(); cleanupFile(fileSys, file); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFSOutputSummer.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFSOutputSummer.java index 8a18420aad..da18bbe0cc 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFSOutputSummer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFSOutputSummer.java @@ -21,6 +21,8 @@ import java.io.*; import java.util.Random; import org.apache.hadoop.conf.Configuration; +import static org.apache.hadoop.fs.CommonConfigurationKeys.IO_FILE_BUFFER_SIZE_KEY; + import org.apache.hadoop.fs.FSDataInputStream; import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FileSystem; @@ -43,7 +45,7 @@ public class TestFSOutputSummer extends TestCase { /* create a file, write all data at once */ private void writeFile1(Path name) throws Exception { FSDataOutputStream stm = fileSys.create(name, true, - fileSys.getConf().getInt("io.file.buffer.size", 4096), + fileSys.getConf().getInt(IO_FILE_BUFFER_SIZE_KEY, 4096), NUM_OF_DATANODES, BLOCK_SIZE); stm.write(expected); stm.close(); @@ -54,7 +56,7 @@ private void writeFile1(Path name) throws Exception { /* create a file, write data chunk by chunk */ private void writeFile2(Path name) throws Exception { FSDataOutputStream stm = fileSys.create(name, true, - fileSys.getConf().getInt("io.file.buffer.size", 4096), + fileSys.getConf().getInt(IO_FILE_BUFFER_SIZE_KEY, 4096), NUM_OF_DATANODES, BLOCK_SIZE); int i=0; for( ;i token = lb.get(0).getBlockToken(); + final DatanodeInfo dnInfo = lb.get(0).getLocations()[0]; + ClientDatanodeProtocol proxy = aUgi + .doAs(new PrivilegedExceptionAction() { + @Override + public ClientDatanodeProtocol run() throws Exception { + return DFSUtil.createClientDatanodeProtocolProxy(dnInfo, conf, + 60000); + } + }); + + //This should succeed + BlockLocalPathInfo blpi = proxy.getBlockLocalPathInfo(blk, token); + Assert.assertEquals(dn.data.getBlockLocalPathInfo(blk).getBlockPath(), + blpi.getBlockPath()); + + // Now try with a not allowed user. + UserGroupInformation bUgi = UserGroupInformation + .createRemoteUser("notalloweduser"); + proxy = bUgi + .doAs(new PrivilegedExceptionAction() { + @Override + public ClientDatanodeProtocol run() throws Exception { + return DFSUtil.createClientDatanodeProtocolProxy(dnInfo, conf, + 60000); + } + }); + try { + proxy.getBlockLocalPathInfo(blk, token); + Assert.fail("The call should have failed as " + bUgi.getShortUserName() + + " is not allowed to call getBlockLocalPathInfo"); + } catch (IOException ex) { + Assert.assertTrue(ex.getMessage().contains( + "not allowed to call getBlockLocalPathInfo")); + } + } finally { + fs.close(); + cluster.shutdown(); + } + } + + /** + * Test to run benchmarks between shortcircuit read vs regular read with + * specified number of threads simultaneously reading. + *
+ * Run this using the following command: + * bin/hadoop --config confdir \ + * org.apache.hadoop.hdfs.TestShortCircuitLocalRead \ + * + */ + public static void main(String[] args) throws Exception { + if (args.length != 3) { + System.out.println("Usage: test shortcircuit checksum threadCount"); + System.exit(1); + } + boolean shortcircuit = Boolean.valueOf(args[0]); + boolean checksum = Boolean.valueOf(args[1]); + int threadCount = Integer.valueOf(args[2]); + + // Setup create a file + Configuration conf = new Configuration(); + conf.setBoolean(DFSConfigKeys.DFS_CLIENT_READ_SHORTCIRCUIT_KEY, shortcircuit); + conf.setBoolean(DFSConfigKeys.DFS_CLIENT_READ_SHORTCIRCUIT_SKIP_CHECKSUM_KEY, + checksum); + + //Override fileSize and DATA_TO_WRITE to much larger values for benchmark test + int fileSize = 1000 * blockSize + 100; // File with 1000 blocks + final byte [] dataToWrite = AppendTestUtil.randomBytes(seed, fileSize); + + // create a new file in home directory. Do not close it. + final Path file1 = new Path("filelocal.dat"); + final FileSystem fs = FileSystem.get(conf); + FSDataOutputStream stm = createFile(fs, file1, 1); + + stm.write(dataToWrite); + stm.close(); + + long start = System.currentTimeMillis(); + final int iteration = 20; + Thread[] threads = new Thread[threadCount]; + for (int i = 0; i < threadCount; i++) { + threads[i] = new Thread() { + public void run() { + for (int i = 0; i < iteration; i++) { + try { + checkFileContent(fs, file1, dataToWrite, 0); + } catch (IOException e) { + e.printStackTrace(); + } + } + } + }; + } + for (int i = 0; i < threadCount; i++) { + threads[i].start(); + } + for (int i = 0; i < threadCount; i++) { + threads[i].join(); + } + long end = System.currentTimeMillis(); + System.out.println("Iteration " + iteration + " took " + (end - start)); + fs.delete(file1, false); + } +} diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSmallBlock.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSmallBlock.java index 8fb2b7a38c..77c4007e4f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSmallBlock.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSmallBlock.java @@ -21,6 +21,7 @@ import java.io.*; import java.util.Random; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.CommonConfigurationKeys; import org.apache.hadoop.fs.FSDataInputStream; import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FileSystem; @@ -40,9 +41,9 @@ public class TestSmallBlock extends TestCase { private void writeFile(FileSystem fileSys, Path name) throws IOException { // create and write a file that contains three blocks of data - FSDataOutputStream stm = fileSys.create(name, true, - fileSys.getConf().getInt("io.file.buffer.size", 4096), - (short)1, (long)blockSize); + FSDataOutputStream stm = fileSys.create(name, true, fileSys.getConf() + .getInt(CommonConfigurationKeys.IO_FILE_BUFFER_SIZE_KEY, 4096), + (short) 1, blockSize); byte[] buffer = new byte[fileSize]; Random rand = new Random(seed); rand.nextBytes(buffer); @@ -92,7 +93,7 @@ private void cleanupFile(FileSystem fileSys, Path name) throws IOException { public void testSmallBlock() throws IOException { Configuration conf = new HdfsConfiguration(); if (simulatedStorage) { - conf.setBoolean("dfs.datanode.simulateddatastorage", true); + conf.setBoolean(DFSConfigKeys.DFS_DATANODE_SIMULATEDDATASTORAGE_KEY, true); } conf.set(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, "1"); MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithMultipleNameNodes.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithMultipleNameNodes.java index 45f41dc7ef..6ca0ffe7b3 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithMultipleNameNodes.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithMultipleNameNodes.java @@ -19,7 +19,6 @@ import java.io.IOException; import java.net.InetSocketAddress; -import java.util.ArrayList; import java.util.Arrays; import java.util.List; import java.util.Random; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockTokenWithDFS.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockTokenWithDFS.java index ddef17ba0f..0a25ef7983 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockTokenWithDFS.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockTokenWithDFS.java @@ -31,6 +31,7 @@ import org.apache.commons.logging.impl.Log4JLogger; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.CommonConfigurationKeys; import org.apache.hadoop.fs.FSDataInputStream; import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FileSystem; @@ -123,7 +124,7 @@ private boolean checkFile(byte[] fileToCheck) { private static FSDataOutputStream writeFile(FileSystem fileSys, Path name, short repl, long blockSize) throws IOException { FSDataOutputStream stm = fileSys.create(name, true, fileSys.getConf() - .getInt("io.file.buffer.size", 4096), repl, blockSize); + .getInt(CommonConfigurationKeys.IO_FILE_BUFFER_SIZE_KEY, 4096), repl, blockSize); return stm; } @@ -173,7 +174,7 @@ private static void tryRead(Configuration conf, LocatedBlock lblock, } // get a conf for testing - private static Configuration getConf(int numDataNodes) throws IOException { + private static Configuration getConf(int numDataNodes) { Configuration conf = new Configuration(); conf.setBoolean(DFSConfigKeys.DFS_BLOCK_ACCESS_TOKEN_ENABLE_KEY, true); conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCK_SIZE); @@ -181,7 +182,8 @@ private static Configuration getConf(int numDataNodes) throws IOException { conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1); conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, numDataNodes); conf.setInt("ipc.client.connect.max.retries", 0); - conf.setBoolean("dfs.support.append", true); + conf.setBoolean(DFSConfigKeys.DFS_SUPPORT_APPEND_KEY, + DFSConfigKeys.DFS_SUPPORT_APPEND_DEFAULT); return conf; } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlocksWithNotEnoughRacks.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlocksWithNotEnoughRacks.java index 01018e8c05..df00728764 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlocksWithNotEnoughRacks.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlocksWithNotEnoughRacks.java @@ -391,7 +391,7 @@ public void testNodeDecomissionRespectsRackPolicy() throws Exception { Path excludeFile = new Path(dir, "exclude"); assertTrue(localFileSys.mkdirs(dir)); DFSTestUtil.writeFile(localFileSys, excludeFile, ""); - conf.set("dfs.hosts.exclude", excludeFile.toUri().getPath()); + conf.set(DFSConfigKeys.DFS_HOSTS_EXCLUDE, excludeFile.toUri().getPath()); // Two blocks and four racks String racks[] = {"/rack1", "/rack1", "/rack2", "/rack2"}; @@ -441,7 +441,7 @@ public void testNodeDecomissionWithOverreplicationRespectsRackPolicy() Path excludeFile = new Path(dir, "exclude"); assertTrue(localFileSys.mkdirs(dir)); DFSTestUtil.writeFile(localFileSys, excludeFile, ""); - conf.set("dfs.hosts.exclude", excludeFile.toUri().getPath()); + conf.set(DFSConfigKeys.DFS_HOSTS_EXCLUDE, excludeFile.toUri().getPath()); // All hosts are on two racks, only one host on /rack2 String racks[] = {"/rack1", "/rack2", "/rack1", "/rack1", "/rack1"}; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/common/TestDistributedUpgrade.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/common/TestDistributedUpgrade.java index 359fa0245b..2bfcbdefd5 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/common/TestDistributedUpgrade.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/common/TestDistributedUpgrade.java @@ -21,8 +21,6 @@ import java.io.IOException; -import junit.framework.TestCase; - import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; @@ -38,9 +36,12 @@ import org.apache.hadoop.hdfs.tools.DFSAdmin; import org.apache.hadoop.test.GenericTestUtils; +import org.junit.Test; +import static org.junit.Assert.*; + /** */ -public class TestDistributedUpgrade extends TestCase { +public class TestDistributedUpgrade { private static final Log LOG = LogFactory.getLog(TestDistributedUpgrade.class); private Configuration conf; private int testCounter = 0; @@ -95,6 +96,7 @@ void startDataNodeShouldFail(StartupOption operation) { /** */ + @Test(timeout=120000) public void testDistributedUpgrade() throws Exception { int numDirs = 1; TestDFSUpgradeFromImage testImg = new TestDFSUpgradeFromImage(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/DataNodeTestUtils.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/DataNodeTestUtils.java index a3d47b623e..2e3bd92373 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/DataNodeTestUtils.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/DataNodeTestUtils.java @@ -21,10 +21,7 @@ import java.io.IOException; -import org.apache.hadoop.hdfs.server.datanode.DataNode.BPOfferService; -import org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol; import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration; -import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo; /** * Utility class for accessing package-private DataNode information during tests. @@ -41,27 +38,4 @@ public class DataNodeTestUtils { return dn.getDNRegistrationForBP(bpid); } - /** - * manually setup datanode to testing - * @param dn - datanode - * @param nsifno - namenode info - * @param bpid - block pool id - * @param nn - namenode object - * @throws IOException - */ - public static void setBPNamenodeByIndex(DataNode dn, - NamespaceInfo nsifno, String bpid, DatanodeProtocol nn) - throws IOException { - // setup the right BPOS.. - BPOfferService [] bposs = dn.getAllBpOs(); - if(bposs.length<0) { - throw new IOException("Datanode wasn't initializes with at least one NN"); - } - for(BPOfferService bpos : bposs) { - bpos.setNamespaceInfo(nsifno); - - dn.setBPNamenode(bpid, nn); - dn.initBlockPool(bpos, nsifno); - } - } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java index ac82322283..9dbcc2f736 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java @@ -35,8 +35,8 @@ import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.protocol.BlockListAsLongs; +import org.apache.hadoop.hdfs.protocol.BlockLocalPathInfo; import org.apache.hadoop.hdfs.protocol.ExtendedBlock; -import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState; import org.apache.hadoop.hdfs.server.datanode.FSDataset.BlockPoolSlice; import org.apache.hadoop.hdfs.server.datanode.FSDataset.FSVolumeSet; @@ -66,9 +66,9 @@ public class SimulatedFSDataset implements FSDatasetInterface, Configurable{ public static final String CONFIG_PROPERTY_SIMULATED = - "dfs.datanode.simulateddatastorage"; + DFSConfigKeys.DFS_DATANODE_SIMULATEDDATASTORAGE_KEY; public static final String CONFIG_PROPERTY_CAPACITY = - "dfs.datanode.simulateddatastorage.capacity"; + DFSConfigKeys.DFS_DATANODE_SIMULATEDDATASTORAGE_CAPACITY_KEY; public static final long DEFAULT_CAPACITY = 2L<<40; // 1 terabyte public static final byte DEFAULT_DATABYTE = 9; // 1 terabyte @@ -136,7 +136,7 @@ synchronized public void setNumBytes(long length) { } } - synchronized SimulatedInputStream getIStream() throws IOException { + synchronized SimulatedInputStream getIStream() { if (!finalized) { // throw new IOException("Trying to read an unfinalized block"); return new SimulatedInputStream(oStream.getLength(), DEFAULT_DATABYTE); @@ -363,7 +363,7 @@ private SimulatedBPStorage getBPStorage(String bpid) throws IOException { private SimulatedStorage storage = null; private String storageId; - public SimulatedFSDataset(Configuration conf) throws IOException { + public SimulatedFSDataset(Configuration conf) { setConf(conf); } @@ -992,4 +992,10 @@ public ReplicaInPipelineInterface convertTemporaryToRbw(ExtendedBlock temporary) } return r; } + + @Override + public BlockLocalPathInfo getBlockLocalPathInfo(ExtendedBlock b) + throws IOException { + throw new IOException("getBlockLocalPathInfo not supported."); + } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeExit.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeExit.java index 20d5dc6a70..b7a10177c1 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeExit.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeExit.java @@ -28,7 +28,6 @@ import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.MiniDFSCluster; -import org.apache.hadoop.hdfs.server.datanode.DataNode.BPOfferService; import org.junit.After; import org.junit.Before; import org.junit.Test; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMultipleRegistrations.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMultipleRegistrations.java index 04fa0ac87f..7b26f4e805 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMultipleRegistrations.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMultipleRegistrations.java @@ -31,7 +31,6 @@ import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption; -import org.apache.hadoop.hdfs.server.datanode.DataNode.BPOfferService; import org.apache.hadoop.hdfs.server.datanode.FSDataset.VolumeInfo; import org.apache.hadoop.hdfs.server.namenode.FSImageTestUtil; import org.apache.hadoop.hdfs.server.namenode.NameNode; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailureToleration.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailureToleration.java index b27f2efdc4..47bfa703db 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailureToleration.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailureToleration.java @@ -38,7 +38,6 @@ import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeManager; -import org.apache.hadoop.hdfs.server.namenode.FSNamesystem; import org.apache.log4j.Level; import org.junit.After; import org.junit.Before; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDatanodeRegister.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDatanodeRegister.java index 11e5f994fa..97554e7a80 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDatanodeRegister.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDatanodeRegister.java @@ -18,39 +18,41 @@ package org.apache.hadoop.hdfs.server.datanode; -import java.io.File; -import java.io.IOException; -import java.util.AbstractList; - +import java.net.InetSocketAddress; import static org.junit.Assert.fail; import static org.mockito.Mockito.*; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdfs.server.common.IncorrectVersionException; import org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol; -import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration; import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo; import org.junit.Test; +import org.mockito.Mockito; public class TestDatanodeRegister { public static final Log LOG = LogFactory.getLog(TestDatanodeRegister.class); + + // Invalid address + static final InetSocketAddress INVALID_ADDR = + new InetSocketAddress("127.0.0.1", 1); + @Test public void testDataNodeRegister() throws Exception { - DataNode.BPOfferService myMockBPOS = mock(DataNode.BPOfferService.class); - doCallRealMethod().when(myMockBPOS).register(); - myMockBPOS.bpRegistration = mock(DatanodeRegistration.class); - when(myMockBPOS.bpRegistration.getStorageID()).thenReturn("myTestStorageID"); + DataNode mockDN = mock(DataNode.class); + Mockito.doReturn(true).when(mockDN).shouldRun(); + BPOfferService bpos = new BPOfferService(INVALID_ADDR, mockDN); + NamespaceInfo fakeNSInfo = mock(NamespaceInfo.class); when(fakeNSInfo.getBuildVersion()).thenReturn("NSBuildVersion"); DatanodeProtocol fakeDNProt = mock(DatanodeProtocol.class); when(fakeDNProt.versionRequest()).thenReturn(fakeNSInfo); - doCallRealMethod().when(myMockBPOS).setNameNode(fakeDNProt); - myMockBPOS.setNameNode( fakeDNProt ); + + bpos.setNameNode( fakeDNProt ); + bpos.bpNSInfo = fakeNSInfo; try { - myMockBPOS.register(); + bpos.retrieveNamespaceInfo(); fail("register() did not throw exception! " + "Expected: IncorrectVersionException"); } catch (IncorrectVersionException ie) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDatanodeRestart.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDatanodeRestart.java index d0beaa2698..e6bd1ea8b1 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDatanodeRestart.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDatanodeRestart.java @@ -73,7 +73,7 @@ public void testRbwReplicas() throws IOException { Configuration conf = new HdfsConfiguration(); conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 1024L); conf.setInt(DFSConfigKeys.DFS_CLIENT_WRITE_PACKET_SIZE_KEY, 512); - conf.setBoolean("dfs.support.append", true); + conf.setBoolean(DFSConfigKeys.DFS_SUPPORT_APPEND_KEY, true); MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build(); cluster.waitActive(); try { @@ -137,7 +137,7 @@ private void testRbwReplicas(MiniDFSCluster cluster, boolean isCorrupt) Configuration conf = new HdfsConfiguration(); conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 1024L); conf.setInt(DFSConfigKeys.DFS_CLIENT_WRITE_PACKET_SIZE_KEY, 512); - conf.setBoolean("dfs.support.append", true); + conf.setBoolean(DFSConfigKeys.DFS_SUPPORT_APPEND_KEY, true); MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build(); cluster.waitActive(); try { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestInterDatanodeProtocol.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestInterDatanodeProtocol.java index 487adfe5bb..d50376aaff 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestInterDatanodeProtocol.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestInterDatanodeProtocol.java @@ -25,14 +25,8 @@ import java.net.InetSocketAddress; import java.net.SocketTimeoutException; -import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.io.Writable; import org.apache.hadoop.io.LongWritable; -import org.apache.hadoop.security.UserGroupInformation; -import org.apache.hadoop.util.StringUtils; - -import org.apache.hadoop.ipc.Client; -import org.apache.hadoop.ipc.ProtocolSignature; import org.apache.hadoop.ipc.RPC; import org.apache.hadoop.ipc.Server; import org.apache.hadoop.net.NetUtils; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestRefreshNamenodes.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestRefreshNamenodes.java index f84a7a3c6a..150f117840 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestRefreshNamenodes.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestRefreshNamenodes.java @@ -26,7 +26,6 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.MiniDFSCluster; -import org.apache.hadoop.hdfs.server.datanode.DataNode.BPOfferService; import org.junit.Test; /** diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCheckpoint.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCheckpoint.java index 6eef234bbf..65169fa011 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCheckpoint.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCheckpoint.java @@ -34,6 +34,7 @@ import org.apache.commons.logging.Log; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdfs.DFSUtil; +import org.apache.hadoop.fs.CommonConfigurationKeys; import org.apache.hadoop.fs.CommonConfigurationKeysPublic; import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FileContext; @@ -99,9 +100,9 @@ public void setUp() throws IOException { static void writeFile(FileSystem fileSys, Path name, int repl) throws IOException { - FSDataOutputStream stm = fileSys.create(name, true, - fileSys.getConf().getInt("io.file.buffer.size", 4096), - (short)repl, (long)blockSize); + FSDataOutputStream stm = fileSys.create(name, true, fileSys.getConf() + .getInt(CommonConfigurationKeys.IO_FILE_BUFFER_SIZE_KEY, 4096), + (short) repl, blockSize); byte[] buffer = new byte[TestCheckpoint.fileSize]; Random rand = new Random(TestCheckpoint.seed); rand.nextBytes(buffer); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestClusterId.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestClusterId.java index 220bfd6a39..aed6787446 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestClusterId.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestClusterId.java @@ -20,8 +20,6 @@ import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY; import static org.junit.Assert.assertTrue; import static org.junit.Assert.assertFalse; -import static org.junit.Assert.fail; - import java.io.File; import java.io.IOException; import java.net.URI; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCorruptFilesJsp.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCorruptFilesJsp.java index b75f2ed35a..dacd03bbfc 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCorruptFilesJsp.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCorruptFilesJsp.java @@ -18,8 +18,6 @@ package org.apache.hadoop.hdfs.server.namenode; import static org.junit.Assert.assertTrue; -import static org.junit.Assert.assertEquals; - import java.net.URL; import java.util.Collection; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDecommissioningStatus.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDecommissioningStatus.java index 0d2ea934e2..9934a6f534 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDecommissioningStatus.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDecommissioningStatus.java @@ -28,6 +28,7 @@ import java.util.Random; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.CommonConfigurationKeys; import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; @@ -117,7 +118,8 @@ private void writeFile(FileSystem fileSys, Path name, short repl) throws IOException { // create and write a file that contains three blocks of data FSDataOutputStream stm = fileSys.create(name, true, fileSys.getConf() - .getInt("io.file.buffer.size", 4096), repl, (long) blockSize); + .getInt(CommonConfigurationKeys.IO_FILE_BUFFER_SIZE_KEY, 4096), repl, + blockSize); byte[] buffer = new byte[fileSize]; Random rand = new Random(seed); rand.nextBytes(buffer); @@ -129,7 +131,8 @@ private FSDataOutputStream writeIncompleteFile(FileSystem fileSys, Path name, short repl) throws IOException { // create and write a file that contains three blocks of data FSDataOutputStream stm = fileSys.create(name, true, fileSys.getConf() - .getInt("io.file.buffer.size", 4096), repl, (long) blockSize); + .getInt(CommonConfigurationKeys.IO_FILE_BUFFER_SIZE_KEY, 4096), repl, + blockSize); byte[] buffer = new byte[fileSize]; Random rand = new Random(seed); rand.nextBytes(buffer); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java index 5c14ab3061..0d2479319e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java @@ -46,7 +46,6 @@ import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; -import org.apache.hadoop.hdfs.server.common.Util; import org.apache.hadoop.hdfs.server.namenode.EditLogFileInputStream; import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory; import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeDirType; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImageStorageInspector.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImageStorageInspector.java index 649c415287..500c5c3c69 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImageStorageInspector.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImageStorageInspector.java @@ -19,13 +19,8 @@ import static org.junit.Assert.*; -import static org.mockito.Mockito.doReturn; -import static org.mockito.Mockito.spy; - import java.io.File; import java.io.IOException; -import java.util.List; - import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory; @@ -34,10 +29,8 @@ import static org.apache.hadoop.hdfs.server.namenode.NNStorage.getFinalizedEditsFileName; import static org.apache.hadoop.hdfs.server.namenode.NNStorage.getImageFileName; -import org.apache.hadoop.hdfs.server.namenode.FileJournalManager.EditLogFile; import org.apache.hadoop.hdfs.server.namenode.FSImageStorageInspector.FSImageFile; import org.junit.Test; -import org.mockito.Mockito; public class TestFSImageStorageInspector { private static final Log LOG = LogFactory.getLog( diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileJournalManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileJournalManager.java index d2f9781bed..e4ff4bb732 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileJournalManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileJournalManager.java @@ -21,21 +21,13 @@ import java.net.URI; import java.util.Collections; -import java.util.Arrays; import java.util.List; -import java.util.ArrayList; import java.util.Iterator; import java.io.RandomAccessFile; import java.io.File; import java.io.FilenameFilter; -import java.io.BufferedInputStream; -import java.io.DataInputStream; import java.io.IOException; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.fs.FileSystem; -import org.apache.hadoop.hdfs.DFSConfigKeys; -import org.apache.hadoop.security.SecurityUtil; import org.junit.Test; import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory; import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeDirType; @@ -48,9 +40,6 @@ import com.google.common.collect.ImmutableList; import com.google.common.base.Joiner; -import java.util.zip.CheckedInputStream; -import java.util.zip.Checksum; - public class TestFileJournalManager { /** diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileLimit.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileLimit.java index 48ab6ce18e..0dfb5e31d6 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileLimit.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileLimit.java @@ -23,6 +23,7 @@ import junit.framework.TestCase; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.CommonConfigurationKeys; import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; @@ -44,9 +45,9 @@ public class TestFileLimit extends TestCase { // creates a zero file. private void createFile(FileSystem fileSys, Path name) throws IOException { - FSDataOutputStream stm = fileSys.create(name, true, - fileSys.getConf().getInt("io.file.buffer.size", 4096), - (short)1, (long)blockSize); + FSDataOutputStream stm = fileSys.create(name, true, fileSys.getConf() + .getInt(CommonConfigurationKeys.IO_FILE_BUFFER_SIZE_KEY, 4096), + (short) 1, blockSize); byte[] buffer = new byte[1024]; Random rand = new Random(seed); rand.nextBytes(buffer); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestMetaSave.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestMetaSave.java index a334de66b4..9ec5d95ba6 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestMetaSave.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestMetaSave.java @@ -29,6 +29,7 @@ import java.util.Random; import static org.junit.Assert.assertTrue; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.CommonConfigurationKeys; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; @@ -50,7 +51,8 @@ public class TestMetaSave { private void createFile(FileSystem fileSys, Path name) throws IOException { FSDataOutputStream stm = fileSys.create(name, true, fileSys.getConf() - .getInt("io.file.buffer.size", 4096), (short) 2, (long) blockSize); + .getInt(CommonConfigurationKeys.IO_FILE_BUFFER_SIZE_KEY, 4096), + (short) 2, blockSize); byte[] buffer = new byte[1024]; Random rand = new Random(seed); rand.nextBytes(buffer); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameEditsConfigs.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameEditsConfigs.java index b024bab1d7..5d0bd62247 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameEditsConfigs.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameEditsConfigs.java @@ -24,6 +24,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.MiniDFSCluster; +import org.apache.hadoop.fs.CommonConfigurationKeys; import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FileUtil; @@ -59,9 +60,9 @@ protected void setUp() throws java.lang.Exception { private void writeFile(FileSystem fileSys, Path name, int repl) throws IOException { - FSDataOutputStream stm = fileSys.create(name, true, - fileSys.getConf().getInt("io.file.buffer.size", 4096), - (short)repl, (long)BLOCK_SIZE); + FSDataOutputStream stm = fileSys.create(name, true, fileSys.getConf() + .getInt(CommonConfigurationKeys.IO_FILE_BUFFER_SIZE_KEY, 4096), + (short) repl, BLOCK_SIZE); byte[] buffer = new byte[FILE_SIZE]; Random rand = new Random(SEED); rand.nextBytes(buffer); @@ -96,7 +97,7 @@ private void checkFile(FileSystem fileSys, Path name, int repl) int replication = fileSys.getFileStatus(name).getReplication(); assertEquals("replication for " + name, repl, replication); long size = fileSys.getContentSummary(name).getLength(); - assertEquals("file size for " + name, size, (long)FILE_SIZE); + assertEquals("file size for " + name, size, FILE_SIZE); } private void cleanupFile(FileSystem fileSys, Path name) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSaveNamespace.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSaveNamespace.java index 13e256d78c..25a458b0c4 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSaveNamespace.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSaveNamespace.java @@ -22,7 +22,6 @@ import static org.junit.Assert.*; import static org.mockito.Matchers.anyObject; -import static org.mockito.Matchers.anyLong; import static org.mockito.Mockito.doAnswer; import static org.mockito.Mockito.doThrow; import static org.mockito.Mockito.spy; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSecurityTokenEditLog.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSecurityTokenEditLog.java index c4e6377857..d4fd72d3b0 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSecurityTokenEditLog.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSecurityTokenEditLog.java @@ -30,7 +30,6 @@ import org.apache.hadoop.hdfs.server.namenode.EditLogFileInputStream; import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory; import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeDirType; -import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeFile; import org.apache.hadoop.io.Text; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.token.Token; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStartup.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStartup.java index 8948f7843e..ceb6261db4 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStartup.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStartup.java @@ -34,6 +34,7 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.CommonConfigurationKeys; import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FileUtil; @@ -76,9 +77,9 @@ public class TestStartup extends TestCase { private void writeFile(FileSystem fileSys, Path name, int repl) throws IOException { - FSDataOutputStream stm = fileSys.create(name, true, - fileSys.getConf().getInt("io.file.buffer.size", 4096), - (short)repl, (long)blockSize); + FSDataOutputStream stm = fileSys.create(name, true, fileSys.getConf() + .getInt(CommonConfigurationKeys.IO_FILE_BUFFER_SIZE_KEY, 4096), + (short) repl, blockSize); byte[] buffer = new byte[fileSize]; Random rand = new Random(seed); rand.nextBytes(buffer); @@ -233,11 +234,13 @@ private void verifyDifferentDirs(FSImage img, long expectedImgSize, long expecte sd = it.next(); if(sd.getStorageDirType().isOfType(NameNodeDirType.IMAGE)) { - File imf = img.getStorage().getStorageFile(sd, NameNodeFile.IMAGE, 0); + img.getStorage(); + File imf = NNStorage.getStorageFile(sd, NameNodeFile.IMAGE, 0); LOG.info("--image file " + imf.getAbsolutePath() + "; len = " + imf.length() + "; expected = " + expectedImgSize); assertEquals(expectedImgSize, imf.length()); } else if(sd.getStorageDirType().isOfType(NameNodeDirType.EDITS)) { - File edf = img.getStorage().getStorageFile(sd, NameNodeFile.EDITS, 0); + img.getStorage(); + File edf = NNStorage.getStorageFile(sd, NameNodeFile.EDITS, 0); LOG.info("-- edits file " + edf.getAbsolutePath() + "; len = " + edf.length() + "; expected = " + expectedEditsSize); assertEquals(expectedEditsSize, edf.length()); } else { @@ -342,8 +345,10 @@ public void testSNNStartup() throws IOException{ FSImage image = nn.getFSImage(); StorageDirectory sd = image.getStorage().getStorageDir(0); //only one assertEquals(sd.getStorageDirType(), NameNodeDirType.IMAGE_AND_EDITS); - File imf = image.getStorage().getStorageFile(sd, NameNodeFile.IMAGE, 0); - File edf = image.getStorage().getStorageFile(sd, NameNodeFile.EDITS, 0); + image.getStorage(); + File imf = NNStorage.getStorageFile(sd, NameNodeFile.IMAGE, 0); + image.getStorage(); + File edf = NNStorage.getStorageFile(sd, NameNodeFile.EDITS, 0); LOG.info("--image file " + imf.getAbsolutePath() + "; len = " + imf.length()); LOG.info("--edits file " + edf.getAbsolutePath() + "; len = " + edf.length()); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/security/TestPermission.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/security/TestPermission.java index e65b9009d5..3823822d0c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/security/TestPermission.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/security/TestPermission.java @@ -18,7 +18,6 @@ package org.apache.hadoop.security; import java.io.IOException; -import java.util.EnumSet; import java.util.Random; import org.apache.commons.logging.Log; @@ -125,7 +124,7 @@ public void testCreate() throws Exception { FsPermission filePerm = new FsPermission((short)0444); FSDataOutputStream out = fs.create(new Path("/b1/b2/b3.txt"), filePerm, - true, conf.getInt("io.file.buffer.size", 4096), + true, conf.getInt(CommonConfigurationKeys.IO_FILE_BUFFER_SIZE_KEY, 4096), fs.getDefaultReplication(), fs.getDefaultBlockSize(), null); out.write(123); out.close(); @@ -224,7 +223,7 @@ public void testFilePermision() throws Exception { userfs.mkdirs(RENAME_PATH); assertTrue(canRename(userfs, RENAME_PATH, CHILD_DIR1)); } finally { - if(cluster != null) cluster.shutdown(); + cluster.shutdown(); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tools/TestJMXGet.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tools/TestJMXGet.java index f3ab700e70..2b18efc71a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tools/TestJMXGet.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tools/TestJMXGet.java @@ -25,6 +25,7 @@ import junit.framework.TestCase; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.CommonConfigurationKeys; import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FileUtil; @@ -51,8 +52,8 @@ public class TestJMXGet extends TestCase { private void writeFile(FileSystem fileSys, Path name, int repl) throws IOException { FSDataOutputStream stm = fileSys.create(name, true, - fileSys.getConf().getInt("io.file.buffer.size", 4096), - (short)repl, (long)blockSize); + fileSys.getConf().getInt(CommonConfigurationKeys.IO_FILE_BUFFER_SIZE_KEY, 4096), + (short)repl, blockSize); byte[] buffer = new byte[fileSize]; Random rand = new Random(seed); rand.nextBytes(buffer); diff --git a/hadoop-mapreduce-project/CHANGES.txt b/hadoop-mapreduce-project/CHANGES.txt index 8e4a8b3b45..2ee9c405ea 100644 --- a/hadoop-mapreduce-project/CHANGES.txt +++ b/hadoop-mapreduce-project/CHANGES.txt @@ -153,6 +153,12 @@ Release 0.23.1 - Unreleased MAPREDUCE-3407. Fixed pom files to refer to the correct MR app-jar needed by the integration tests. (Hitesh Shah via vinodkv) + MAPREDUCE-3434. Nightly build broken (Hitesh Shah via mahadev) + + MAPREDUCE-3447. mapreduce examples not working (mahadev) + + MAPREDUCE-3444. trunk/0.23 builds broken (Hitesh Shah via mahadev) + Release 0.23.0 - 2011-11-01 INCOMPATIBLE CHANGES diff --git a/hadoop-mapreduce-project/build.xml b/hadoop-mapreduce-project/build.xml index 16f02a3742..a8b4618268 100644 --- a/hadoop-mapreduce-project/build.xml +++ b/hadoop-mapreduce-project/build.xml @@ -867,7 +867,6 @@ - @@ -897,12 +896,10 @@ - - @@ -962,7 +959,6 @@ - @@ -987,7 +983,6 @@ - @@ -1026,7 +1021,6 @@ - @@ -1052,7 +1046,6 @@ - @@ -1792,10 +1785,6 @@ output="${build.dir.eclipse-contrib-classes}/raid/main" /> - - test-jar test + + org.apache.hadoop + hadoop-hdfs + test + test-jar + @@ -104,6 +110,10 @@ ${java.home} + + + ${project.build.directory}/${project.artifactId}-${project.version}-tests.jar + diff --git a/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/ClusterMapReduceTestCase.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/ClusterMapReduceTestCase.java similarity index 98% rename from hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/ClusterMapReduceTestCase.java rename to hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/ClusterMapReduceTestCase.java index 56f0ef6f9c..5bf4ff11b8 100644 --- a/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/ClusterMapReduceTestCase.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/ClusterMapReduceTestCase.java @@ -176,7 +176,7 @@ protected Path getTestRootDir() { * @return path to the input directory for the tescase. */ protected Path getInputDir() { - return new Path("input"); + return new Path("target/input"); } /** @@ -185,7 +185,7 @@ protected Path getInputDir() { * @return path to the output directory for the tescase. */ protected Path getOutputDir() { - return new Path("output"); + return new Path("target/output"); } /** diff --git a/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapreduce/MapReduceTestUtil.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/MapReduceTestUtil.java similarity index 100% rename from hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapreduce/MapReduceTestUtil.java rename to hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/MapReduceTestUtil.java diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-examples/pom.xml b/hadoop-mapreduce-project/hadoop-mapreduce-examples/pom.xml new file mode 100644 index 0000000000..48824df082 --- /dev/null +++ b/hadoop-mapreduce-project/hadoop-mapreduce-examples/pom.xml @@ -0,0 +1,63 @@ + + + + 4.0.0 + + org.apache.hadoop + hadoop-project + 0.24.0-SNAPSHOT + ../../hadoop-project + + org.apache.hadoop + hadoop-mapreduce-examples + 0.24.0-SNAPSHOT + Apache Hadoop MapReduce Examples + Apache Hadoop MapReduce Examples + jar + + + + org.apache.hadoop + hadoop-mapreduce-client-jobclient + provided + + + org.apache.hadoop + hadoop-common + provided + + + org.apache.hadoop + hadoop-hdfs + provided + + + + + + + org.apache.maven.plugins + maven-jar-plugin + + + + org.apache.hadoop.examples.ExampleDriver + + + + + + + diff --git a/hadoop-mapreduce-project/src/examples/org/apache/hadoop/examples/AggregateWordCount.java b/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/AggregateWordCount.java similarity index 100% rename from hadoop-mapreduce-project/src/examples/org/apache/hadoop/examples/AggregateWordCount.java rename to hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/AggregateWordCount.java diff --git a/hadoop-mapreduce-project/src/examples/org/apache/hadoop/examples/AggregateWordHistogram.java b/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/AggregateWordHistogram.java similarity index 100% rename from hadoop-mapreduce-project/src/examples/org/apache/hadoop/examples/AggregateWordHistogram.java rename to hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/AggregateWordHistogram.java diff --git a/hadoop-mapreduce-project/src/examples/org/apache/hadoop/examples/BaileyBorweinPlouffe.java b/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/BaileyBorweinPlouffe.java similarity index 100% rename from hadoop-mapreduce-project/src/examples/org/apache/hadoop/examples/BaileyBorweinPlouffe.java rename to hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/BaileyBorweinPlouffe.java diff --git a/hadoop-mapreduce-project/src/examples/org/apache/hadoop/examples/DBCountPageView.java b/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/DBCountPageView.java similarity index 100% rename from hadoop-mapreduce-project/src/examples/org/apache/hadoop/examples/DBCountPageView.java rename to hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/DBCountPageView.java diff --git a/hadoop-mapreduce-project/src/examples/org/apache/hadoop/examples/ExampleDriver.java b/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/ExampleDriver.java similarity index 100% rename from hadoop-mapreduce-project/src/examples/org/apache/hadoop/examples/ExampleDriver.java rename to hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/ExampleDriver.java diff --git a/hadoop-mapreduce-project/src/examples/org/apache/hadoop/examples/Grep.java b/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/Grep.java similarity index 100% rename from hadoop-mapreduce-project/src/examples/org/apache/hadoop/examples/Grep.java rename to hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/Grep.java diff --git a/hadoop-mapreduce-project/src/examples/org/apache/hadoop/examples/Join.java b/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/Join.java similarity index 100% rename from hadoop-mapreduce-project/src/examples/org/apache/hadoop/examples/Join.java rename to hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/Join.java diff --git a/hadoop-mapreduce-project/src/examples/org/apache/hadoop/examples/MultiFileWordCount.java b/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/MultiFileWordCount.java similarity index 100% rename from hadoop-mapreduce-project/src/examples/org/apache/hadoop/examples/MultiFileWordCount.java rename to hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/MultiFileWordCount.java diff --git a/hadoop-mapreduce-project/src/examples/org/apache/hadoop/examples/QuasiMonteCarlo.java b/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/QuasiMonteCarlo.java similarity index 100% rename from hadoop-mapreduce-project/src/examples/org/apache/hadoop/examples/QuasiMonteCarlo.java rename to hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/QuasiMonteCarlo.java diff --git a/hadoop-mapreduce-project/src/examples/org/apache/hadoop/examples/RandomTextWriter.java b/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/RandomTextWriter.java similarity index 100% rename from hadoop-mapreduce-project/src/examples/org/apache/hadoop/examples/RandomTextWriter.java rename to hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/RandomTextWriter.java diff --git a/hadoop-mapreduce-project/src/examples/org/apache/hadoop/examples/RandomWriter.java b/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/RandomWriter.java similarity index 100% rename from hadoop-mapreduce-project/src/examples/org/apache/hadoop/examples/RandomWriter.java rename to hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/RandomWriter.java diff --git a/hadoop-mapreduce-project/src/examples/org/apache/hadoop/examples/SecondarySort.java b/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/SecondarySort.java similarity index 100% rename from hadoop-mapreduce-project/src/examples/org/apache/hadoop/examples/SecondarySort.java rename to hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/SecondarySort.java diff --git a/hadoop-mapreduce-project/src/examples/org/apache/hadoop/examples/Sort.java b/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/Sort.java similarity index 100% rename from hadoop-mapreduce-project/src/examples/org/apache/hadoop/examples/Sort.java rename to hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/Sort.java diff --git a/hadoop-mapreduce-project/src/examples/org/apache/hadoop/examples/WordCount.java b/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/WordCount.java similarity index 100% rename from hadoop-mapreduce-project/src/examples/org/apache/hadoop/examples/WordCount.java rename to hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/WordCount.java diff --git a/hadoop-mapreduce-project/src/examples/org/apache/hadoop/examples/WordMean.java b/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/WordMean.java similarity index 100% rename from hadoop-mapreduce-project/src/examples/org/apache/hadoop/examples/WordMean.java rename to hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/WordMean.java diff --git a/hadoop-mapreduce-project/src/examples/org/apache/hadoop/examples/WordMedian.java b/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/WordMedian.java similarity index 100% rename from hadoop-mapreduce-project/src/examples/org/apache/hadoop/examples/WordMedian.java rename to hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/WordMedian.java diff --git a/hadoop-mapreduce-project/src/examples/org/apache/hadoop/examples/WordStandardDeviation.java b/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/WordStandardDeviation.java similarity index 100% rename from hadoop-mapreduce-project/src/examples/org/apache/hadoop/examples/WordStandardDeviation.java rename to hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/WordStandardDeviation.java diff --git a/hadoop-mapreduce-project/src/examples/org/apache/hadoop/examples/dancing/DancingLinks.java b/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/dancing/DancingLinks.java similarity index 100% rename from hadoop-mapreduce-project/src/examples/org/apache/hadoop/examples/dancing/DancingLinks.java rename to hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/dancing/DancingLinks.java diff --git a/hadoop-mapreduce-project/src/examples/org/apache/hadoop/examples/dancing/DistributedPentomino.java b/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/dancing/DistributedPentomino.java similarity index 100% rename from hadoop-mapreduce-project/src/examples/org/apache/hadoop/examples/dancing/DistributedPentomino.java rename to hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/dancing/DistributedPentomino.java diff --git a/hadoop-mapreduce-project/src/examples/org/apache/hadoop/examples/dancing/OneSidedPentomino.java b/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/dancing/OneSidedPentomino.java similarity index 100% rename from hadoop-mapreduce-project/src/examples/org/apache/hadoop/examples/dancing/OneSidedPentomino.java rename to hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/dancing/OneSidedPentomino.java diff --git a/hadoop-mapreduce-project/src/examples/org/apache/hadoop/examples/dancing/Pentomino.java b/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/dancing/Pentomino.java similarity index 100% rename from hadoop-mapreduce-project/src/examples/org/apache/hadoop/examples/dancing/Pentomino.java rename to hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/dancing/Pentomino.java diff --git a/hadoop-mapreduce-project/src/examples/org/apache/hadoop/examples/dancing/Sudoku.java b/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/dancing/Sudoku.java similarity index 100% rename from hadoop-mapreduce-project/src/examples/org/apache/hadoop/examples/dancing/Sudoku.java rename to hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/dancing/Sudoku.java diff --git a/hadoop-mapreduce-project/src/examples/org/apache/hadoop/examples/dancing/package.html b/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/dancing/package.html similarity index 100% rename from hadoop-mapreduce-project/src/examples/org/apache/hadoop/examples/dancing/package.html rename to hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/dancing/package.html diff --git a/hadoop-mapreduce-project/src/examples/org/apache/hadoop/examples/dancing/puzzle1.dta b/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/dancing/puzzle1.dta similarity index 100% rename from hadoop-mapreduce-project/src/examples/org/apache/hadoop/examples/dancing/puzzle1.dta rename to hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/dancing/puzzle1.dta diff --git a/hadoop-mapreduce-project/src/examples/org/apache/hadoop/examples/package.html b/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/package.html similarity index 100% rename from hadoop-mapreduce-project/src/examples/org/apache/hadoop/examples/package.html rename to hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/package.html diff --git a/hadoop-mapreduce-project/src/examples/org/apache/hadoop/examples/pi/Combinable.java b/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/pi/Combinable.java similarity index 100% rename from hadoop-mapreduce-project/src/examples/org/apache/hadoop/examples/pi/Combinable.java rename to hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/pi/Combinable.java diff --git a/hadoop-mapreduce-project/src/examples/org/apache/hadoop/examples/pi/Container.java b/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/pi/Container.java similarity index 100% rename from hadoop-mapreduce-project/src/examples/org/apache/hadoop/examples/pi/Container.java rename to hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/pi/Container.java diff --git a/hadoop-mapreduce-project/src/examples/org/apache/hadoop/examples/pi/DistBbp.java b/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/pi/DistBbp.java similarity index 100% rename from hadoop-mapreduce-project/src/examples/org/apache/hadoop/examples/pi/DistBbp.java rename to hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/pi/DistBbp.java diff --git a/hadoop-mapreduce-project/src/examples/org/apache/hadoop/examples/pi/DistSum.java b/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/pi/DistSum.java similarity index 98% rename from hadoop-mapreduce-project/src/examples/org/apache/hadoop/examples/pi/DistSum.java rename to hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/pi/DistSum.java index 22e65543f2..b365ba701d 100644 --- a/hadoop-mapreduce-project/src/examples/org/apache/hadoop/examples/pi/DistSum.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/pi/DistSum.java @@ -38,7 +38,6 @@ import org.apache.hadoop.io.IntWritable; import org.apache.hadoop.io.NullWritable; import org.apache.hadoop.io.Writable; -import org.apache.hadoop.mapred.JobTracker; import org.apache.hadoop.mapreduce.Cluster; import org.apache.hadoop.mapreduce.ClusterMetrics; import org.apache.hadoop.mapreduce.InputFormat; @@ -53,6 +52,7 @@ import org.apache.hadoop.mapreduce.TaskAttemptContext; import org.apache.hadoop.mapreduce.TaskInputOutputContext; import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat; +import org.apache.hadoop.net.NetUtils; import org.apache.hadoop.util.Tool; import org.apache.hadoop.util.ToolRunner; @@ -386,8 +386,11 @@ public static class MixMachine extends Machine { @Override public synchronized void init(Job job) throws IOException { final Configuration conf = job.getConfiguration(); - if (cluster == null) - cluster = new Cluster(JobTracker.getAddress(conf), conf); + if (cluster == null) { + String jobTrackerStr = conf.get("mapreduce.jobtracker.address", "localhost:8012"); + cluster = new Cluster(NetUtils.createSocketAddr(jobTrackerStr), conf); + + } chooseMachine(conf).init(job); } @@ -604,4 +607,4 @@ public int run(String[] args) throws Exception { public static void main(String[] args) throws Exception { System.exit(ToolRunner.run(null, new DistSum(), args)); } -} \ No newline at end of file +} diff --git a/hadoop-mapreduce-project/src/examples/org/apache/hadoop/examples/pi/Parser.java b/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/pi/Parser.java similarity index 100% rename from hadoop-mapreduce-project/src/examples/org/apache/hadoop/examples/pi/Parser.java rename to hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/pi/Parser.java diff --git a/hadoop-mapreduce-project/src/examples/org/apache/hadoop/examples/pi/SummationWritable.java b/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/pi/SummationWritable.java similarity index 100% rename from hadoop-mapreduce-project/src/examples/org/apache/hadoop/examples/pi/SummationWritable.java rename to hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/pi/SummationWritable.java diff --git a/hadoop-mapreduce-project/src/examples/org/apache/hadoop/examples/pi/TaskResult.java b/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/pi/TaskResult.java similarity index 100% rename from hadoop-mapreduce-project/src/examples/org/apache/hadoop/examples/pi/TaskResult.java rename to hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/pi/TaskResult.java diff --git a/hadoop-mapreduce-project/src/examples/org/apache/hadoop/examples/pi/Util.java b/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/pi/Util.java similarity index 100% rename from hadoop-mapreduce-project/src/examples/org/apache/hadoop/examples/pi/Util.java rename to hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/pi/Util.java diff --git a/hadoop-mapreduce-project/src/examples/org/apache/hadoop/examples/pi/math/ArithmeticProgression.java b/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/pi/math/ArithmeticProgression.java similarity index 100% rename from hadoop-mapreduce-project/src/examples/org/apache/hadoop/examples/pi/math/ArithmeticProgression.java rename to hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/pi/math/ArithmeticProgression.java diff --git a/hadoop-mapreduce-project/src/examples/org/apache/hadoop/examples/pi/math/Bellard.java b/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/pi/math/Bellard.java similarity index 100% rename from hadoop-mapreduce-project/src/examples/org/apache/hadoop/examples/pi/math/Bellard.java rename to hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/pi/math/Bellard.java diff --git a/hadoop-mapreduce-project/src/examples/org/apache/hadoop/examples/pi/math/LongLong.java b/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/pi/math/LongLong.java similarity index 100% rename from hadoop-mapreduce-project/src/examples/org/apache/hadoop/examples/pi/math/LongLong.java rename to hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/pi/math/LongLong.java diff --git a/hadoop-mapreduce-project/src/examples/org/apache/hadoop/examples/pi/math/Modular.java b/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/pi/math/Modular.java similarity index 100% rename from hadoop-mapreduce-project/src/examples/org/apache/hadoop/examples/pi/math/Modular.java rename to hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/pi/math/Modular.java diff --git a/hadoop-mapreduce-project/src/examples/org/apache/hadoop/examples/pi/math/Montgomery.java b/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/pi/math/Montgomery.java similarity index 100% rename from hadoop-mapreduce-project/src/examples/org/apache/hadoop/examples/pi/math/Montgomery.java rename to hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/pi/math/Montgomery.java diff --git a/hadoop-mapreduce-project/src/examples/org/apache/hadoop/examples/pi/math/Summation.java b/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/pi/math/Summation.java similarity index 100% rename from hadoop-mapreduce-project/src/examples/org/apache/hadoop/examples/pi/math/Summation.java rename to hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/pi/math/Summation.java diff --git a/hadoop-mapreduce-project/src/examples/org/apache/hadoop/examples/pi/math/package.html b/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/pi/math/package.html similarity index 100% rename from hadoop-mapreduce-project/src/examples/org/apache/hadoop/examples/pi/math/package.html rename to hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/pi/math/package.html diff --git a/hadoop-mapreduce-project/src/examples/org/apache/hadoop/examples/pi/package.html b/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/pi/package.html similarity index 100% rename from hadoop-mapreduce-project/src/examples/org/apache/hadoop/examples/pi/package.html rename to hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/pi/package.html diff --git a/hadoop-mapreduce-project/src/examples/org/apache/hadoop/examples/terasort/2009-write-up/.gitignore b/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/terasort/2009-write-up/.gitignore similarity index 100% rename from hadoop-mapreduce-project/src/examples/org/apache/hadoop/examples/terasort/2009-write-up/.gitignore rename to hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/terasort/2009-write-up/.gitignore diff --git a/hadoop-mapreduce-project/src/examples/org/apache/hadoop/examples/terasort/2009-write-up/100TBTaskTime.png b/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/terasort/2009-write-up/100TBTaskTime.png similarity index 100% rename from hadoop-mapreduce-project/src/examples/org/apache/hadoop/examples/terasort/2009-write-up/100TBTaskTime.png rename to hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/terasort/2009-write-up/100TBTaskTime.png diff --git a/hadoop-mapreduce-project/src/examples/org/apache/hadoop/examples/terasort/2009-write-up/1PBTaskTime.png b/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/terasort/2009-write-up/1PBTaskTime.png similarity index 100% rename from hadoop-mapreduce-project/src/examples/org/apache/hadoop/examples/terasort/2009-write-up/1PBTaskTime.png rename to hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/terasort/2009-write-up/1PBTaskTime.png diff --git a/hadoop-mapreduce-project/src/examples/org/apache/hadoop/examples/terasort/2009-write-up/1TBTaskTime.png b/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/terasort/2009-write-up/1TBTaskTime.png similarity index 100% rename from hadoop-mapreduce-project/src/examples/org/apache/hadoop/examples/terasort/2009-write-up/1TBTaskTime.png rename to hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/terasort/2009-write-up/1TBTaskTime.png diff --git a/hadoop-mapreduce-project/src/examples/org/apache/hadoop/examples/terasort/2009-write-up/500GBTaskTime.png b/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/terasort/2009-write-up/500GBTaskTime.png similarity index 100% rename from hadoop-mapreduce-project/src/examples/org/apache/hadoop/examples/terasort/2009-write-up/500GBTaskTime.png rename to hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/terasort/2009-write-up/500GBTaskTime.png diff --git a/hadoop-mapreduce-project/src/examples/org/apache/hadoop/examples/terasort/2009-write-up/Yahoo2009.tex b/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/terasort/2009-write-up/Yahoo2009.tex similarity index 100% rename from hadoop-mapreduce-project/src/examples/org/apache/hadoop/examples/terasort/2009-write-up/Yahoo2009.tex rename to hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/terasort/2009-write-up/Yahoo2009.tex diff --git a/hadoop-mapreduce-project/src/examples/org/apache/hadoop/examples/terasort/2009-write-up/tera.bib b/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/terasort/2009-write-up/tera.bib similarity index 100% rename from hadoop-mapreduce-project/src/examples/org/apache/hadoop/examples/terasort/2009-write-up/tera.bib rename to hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/terasort/2009-write-up/tera.bib diff --git a/hadoop-mapreduce-project/src/examples/org/apache/hadoop/examples/terasort/GenSort.java b/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/terasort/GenSort.java similarity index 100% rename from hadoop-mapreduce-project/src/examples/org/apache/hadoop/examples/terasort/GenSort.java rename to hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/terasort/GenSort.java diff --git a/hadoop-mapreduce-project/src/examples/org/apache/hadoop/examples/terasort/Random16.java b/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/terasort/Random16.java similarity index 100% rename from hadoop-mapreduce-project/src/examples/org/apache/hadoop/examples/terasort/Random16.java rename to hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/terasort/Random16.java diff --git a/hadoop-mapreduce-project/src/examples/org/apache/hadoop/examples/terasort/TeraChecksum.java b/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/terasort/TeraChecksum.java similarity index 100% rename from hadoop-mapreduce-project/src/examples/org/apache/hadoop/examples/terasort/TeraChecksum.java rename to hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/terasort/TeraChecksum.java diff --git a/hadoop-mapreduce-project/src/examples/org/apache/hadoop/examples/terasort/TeraGen.java b/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/terasort/TeraGen.java similarity index 100% rename from hadoop-mapreduce-project/src/examples/org/apache/hadoop/examples/terasort/TeraGen.java rename to hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/terasort/TeraGen.java diff --git a/hadoop-mapreduce-project/src/examples/org/apache/hadoop/examples/terasort/TeraInputFormat.java b/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/terasort/TeraInputFormat.java similarity index 100% rename from hadoop-mapreduce-project/src/examples/org/apache/hadoop/examples/terasort/TeraInputFormat.java rename to hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/terasort/TeraInputFormat.java diff --git a/hadoop-mapreduce-project/src/examples/org/apache/hadoop/examples/terasort/TeraOutputFormat.java b/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/terasort/TeraOutputFormat.java similarity index 100% rename from hadoop-mapreduce-project/src/examples/org/apache/hadoop/examples/terasort/TeraOutputFormat.java rename to hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/terasort/TeraOutputFormat.java diff --git a/hadoop-mapreduce-project/src/examples/org/apache/hadoop/examples/terasort/TeraScheduler.java b/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/terasort/TeraScheduler.java similarity index 100% rename from hadoop-mapreduce-project/src/examples/org/apache/hadoop/examples/terasort/TeraScheduler.java rename to hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/terasort/TeraScheduler.java diff --git a/hadoop-mapreduce-project/src/examples/org/apache/hadoop/examples/terasort/TeraSort.java b/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/terasort/TeraSort.java similarity index 100% rename from hadoop-mapreduce-project/src/examples/org/apache/hadoop/examples/terasort/TeraSort.java rename to hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/terasort/TeraSort.java diff --git a/hadoop-mapreduce-project/src/examples/org/apache/hadoop/examples/terasort/TeraValidate.java b/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/terasort/TeraValidate.java similarity index 100% rename from hadoop-mapreduce-project/src/examples/org/apache/hadoop/examples/terasort/TeraValidate.java rename to hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/terasort/TeraValidate.java diff --git a/hadoop-mapreduce-project/src/examples/org/apache/hadoop/examples/terasort/Unsigned16.java b/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/terasort/Unsigned16.java similarity index 100% rename from hadoop-mapreduce-project/src/examples/org/apache/hadoop/examples/terasort/Unsigned16.java rename to hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/terasort/Unsigned16.java diff --git a/hadoop-mapreduce-project/src/examples/org/apache/hadoop/examples/terasort/job_history_summary.py b/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/terasort/job_history_summary.py similarity index 100% rename from hadoop-mapreduce-project/src/examples/org/apache/hadoop/examples/terasort/job_history_summary.py rename to hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/terasort/job_history_summary.py diff --git a/hadoop-mapreduce-project/src/examples/org/apache/hadoop/examples/terasort/package.html b/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/terasort/package.html similarity index 100% rename from hadoop-mapreduce-project/src/examples/org/apache/hadoop/examples/terasort/package.html rename to hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/terasort/package.html diff --git a/hadoop-mapreduce-project/ivy.xml b/hadoop-mapreduce-project/ivy.xml index f886bfa3f5..ee57f1af0e 100644 --- a/hadoop-mapreduce-project/ivy.xml +++ b/hadoop-mapreduce-project/ivy.xml @@ -91,15 +91,14 @@ rev="${yarn.version}" conf="compile->default"/> + - - diff --git a/hadoop-mapreduce-project/pom.xml b/hadoop-mapreduce-project/pom.xml index 9be19324c9..7939789e69 100644 --- a/hadoop-mapreduce-project/pom.xml +++ b/hadoop-mapreduce-project/pom.xml @@ -35,12 +35,13 @@ once ${basedir} - + hadoop-yarn - hadoop-mapreduce-client + hadoop-mapreduce-client + hadoop-mapreduce-examples - + com.google.protobuf @@ -106,7 +107,7 @@ - + org.slf4j slf4j-api @@ -166,9 +167,9 @@ clover 3.0.2 - + - + @@ -321,7 +322,7 @@ - + dist diff --git a/hadoop-project/pom.xml b/hadoop-project/pom.xml index 0f183b61f0..a835f6a552 100644 --- a/hadoop-project/pom.xml +++ b/hadoop-project/pom.xml @@ -45,7 +45,7 @@ ${project.version} 1.0.3 - + ${project.build.directory}/test-dir ${test.build.dir} @@ -99,6 +99,51 @@ ${project.version} + + org.apache.hadoop + hadoop-mapreduce-client-core + ${project.version} + + + + org.apache.hadoop + hadoop-mapreduce-client-jobclient + ${project.version} + + + + org.apache.hadoop + hadoop-yarn-server-tests + ${project.version} + test-jar + + + + org.apache.hadoop + hadoop-mapreduce-client-jobclient + ${project.version} + test-jar + + + + org.apache.hadoop + hadoop-mapreduce-client-hs + ${project.version} + + + + org.apache.hadoop + hadoop-hdfs + ${project.version} + test-jar + + + + org.apache.hadoop + hadoop-mapreduce-examples + ${project.version} + + com.google.guava guava @@ -177,6 +222,11 @@ 1.8 + + org.mortbay.jetty + jetty-servlet-tester + 6.1.26 + tomcat jasper-compiler @@ -544,6 +594,7 @@ true ${basedir}/src/test/resources/krb5.conf + file:///dev/urandom **/Test*.java diff --git a/hadoop-tools/hadoop-streaming/pom.xml b/hadoop-tools/hadoop-streaming/pom.xml new file mode 100644 index 0000000000..3e6ae3f438 --- /dev/null +++ b/hadoop-tools/hadoop-streaming/pom.xml @@ -0,0 +1,121 @@ + + + + 4.0.0 + + org.apache.hadoop + hadoop-project + 0.24.0-SNAPSHOT + ../../hadoop-project + + org.apache.hadoop + hadoop-streaming + 0.24.0-SNAPSHOT + Apache Hadoop MapReduce Streaming + Apache Hadoop MapReduce Streaming + jar + + + ${project.build.directory}/log + %regex[.*(TestStreamingBadRecords|TestStreamingCombiner|TestStreamingStatus|TestUlimit).*] + + + + + org.apache.hadoop + hadoop-annotations + provided + + + org.apache.hadoop + hadoop-mapreduce-client-app + test + + + org.apache.hadoop + hadoop-mapreduce-client-hs + test + + + org.apache.hadoop + hadoop-mapreduce-client-core + provided + + + org.apache.hadoop + hadoop-mapreduce-client-jobclient + provided + + + org.apache.hadoop + hadoop-mapreduce-client-jobclient + test + test-jar + + + org.apache.hadoop + hadoop-common + provided + + + org.apache.hadoop + hadoop-hdfs + provided + + + org.apache.hadoop + hadoop-common + test + test-jar + + + org.apache.hadoop + hadoop-hdfs + test + test-jar + + + org.apache.hadoop + hadoop-yarn-server-tests + test-jar + test + + + + + + + org.apache.maven.plugins + maven-antrun-plugin + + + create-log-dir + process-test-resources + + run + + + + + + + + + + + + + + diff --git a/hadoop-mapreduce-project/src/contrib/streaming/src/java/org/apache/hadoop/streaming/AutoInputFormat.java b/hadoop-tools/hadoop-streaming/src/main/java/org/apache/hadoop/streaming/AutoInputFormat.java similarity index 100% rename from hadoop-mapreduce-project/src/contrib/streaming/src/java/org/apache/hadoop/streaming/AutoInputFormat.java rename to hadoop-tools/hadoop-streaming/src/main/java/org/apache/hadoop/streaming/AutoInputFormat.java diff --git a/hadoop-mapreduce-project/src/contrib/streaming/src/java/org/apache/hadoop/streaming/DumpTypedBytes.java b/hadoop-tools/hadoop-streaming/src/main/java/org/apache/hadoop/streaming/DumpTypedBytes.java similarity index 100% rename from hadoop-mapreduce-project/src/contrib/streaming/src/java/org/apache/hadoop/streaming/DumpTypedBytes.java rename to hadoop-tools/hadoop-streaming/src/main/java/org/apache/hadoop/streaming/DumpTypedBytes.java diff --git a/hadoop-mapreduce-project/src/contrib/streaming/src/java/org/apache/hadoop/streaming/Environment.java b/hadoop-tools/hadoop-streaming/src/main/java/org/apache/hadoop/streaming/Environment.java similarity index 100% rename from hadoop-mapreduce-project/src/contrib/streaming/src/java/org/apache/hadoop/streaming/Environment.java rename to hadoop-tools/hadoop-streaming/src/main/java/org/apache/hadoop/streaming/Environment.java diff --git a/hadoop-mapreduce-project/src/contrib/streaming/src/java/org/apache/hadoop/streaming/HadoopStreaming.java b/hadoop-tools/hadoop-streaming/src/main/java/org/apache/hadoop/streaming/HadoopStreaming.java similarity index 100% rename from hadoop-mapreduce-project/src/contrib/streaming/src/java/org/apache/hadoop/streaming/HadoopStreaming.java rename to hadoop-tools/hadoop-streaming/src/main/java/org/apache/hadoop/streaming/HadoopStreaming.java diff --git a/hadoop-mapreduce-project/src/contrib/streaming/src/java/org/apache/hadoop/streaming/JarBuilder.java b/hadoop-tools/hadoop-streaming/src/main/java/org/apache/hadoop/streaming/JarBuilder.java similarity index 100% rename from hadoop-mapreduce-project/src/contrib/streaming/src/java/org/apache/hadoop/streaming/JarBuilder.java rename to hadoop-tools/hadoop-streaming/src/main/java/org/apache/hadoop/streaming/JarBuilder.java diff --git a/hadoop-mapreduce-project/src/contrib/streaming/src/java/org/apache/hadoop/streaming/LoadTypedBytes.java b/hadoop-tools/hadoop-streaming/src/main/java/org/apache/hadoop/streaming/LoadTypedBytes.java similarity index 100% rename from hadoop-mapreduce-project/src/contrib/streaming/src/java/org/apache/hadoop/streaming/LoadTypedBytes.java rename to hadoop-tools/hadoop-streaming/src/main/java/org/apache/hadoop/streaming/LoadTypedBytes.java diff --git a/hadoop-mapreduce-project/src/contrib/streaming/src/java/org/apache/hadoop/streaming/PathFinder.java b/hadoop-tools/hadoop-streaming/src/main/java/org/apache/hadoop/streaming/PathFinder.java similarity index 100% rename from hadoop-mapreduce-project/src/contrib/streaming/src/java/org/apache/hadoop/streaming/PathFinder.java rename to hadoop-tools/hadoop-streaming/src/main/java/org/apache/hadoop/streaming/PathFinder.java diff --git a/hadoop-mapreduce-project/src/contrib/streaming/src/java/org/apache/hadoop/streaming/PipeCombiner.java b/hadoop-tools/hadoop-streaming/src/main/java/org/apache/hadoop/streaming/PipeCombiner.java similarity index 100% rename from hadoop-mapreduce-project/src/contrib/streaming/src/java/org/apache/hadoop/streaming/PipeCombiner.java rename to hadoop-tools/hadoop-streaming/src/main/java/org/apache/hadoop/streaming/PipeCombiner.java diff --git a/hadoop-mapreduce-project/src/contrib/streaming/src/java/org/apache/hadoop/streaming/PipeMapRed.java b/hadoop-tools/hadoop-streaming/src/main/java/org/apache/hadoop/streaming/PipeMapRed.java similarity index 100% rename from hadoop-mapreduce-project/src/contrib/streaming/src/java/org/apache/hadoop/streaming/PipeMapRed.java rename to hadoop-tools/hadoop-streaming/src/main/java/org/apache/hadoop/streaming/PipeMapRed.java diff --git a/hadoop-mapreduce-project/src/contrib/streaming/src/java/org/apache/hadoop/streaming/PipeMapRunner.java b/hadoop-tools/hadoop-streaming/src/main/java/org/apache/hadoop/streaming/PipeMapRunner.java similarity index 100% rename from hadoop-mapreduce-project/src/contrib/streaming/src/java/org/apache/hadoop/streaming/PipeMapRunner.java rename to hadoop-tools/hadoop-streaming/src/main/java/org/apache/hadoop/streaming/PipeMapRunner.java diff --git a/hadoop-mapreduce-project/src/contrib/streaming/src/java/org/apache/hadoop/streaming/PipeMapper.java b/hadoop-tools/hadoop-streaming/src/main/java/org/apache/hadoop/streaming/PipeMapper.java similarity index 100% rename from hadoop-mapreduce-project/src/contrib/streaming/src/java/org/apache/hadoop/streaming/PipeMapper.java rename to hadoop-tools/hadoop-streaming/src/main/java/org/apache/hadoop/streaming/PipeMapper.java diff --git a/hadoop-mapreduce-project/src/contrib/streaming/src/java/org/apache/hadoop/streaming/PipeReducer.java b/hadoop-tools/hadoop-streaming/src/main/java/org/apache/hadoop/streaming/PipeReducer.java similarity index 100% rename from hadoop-mapreduce-project/src/contrib/streaming/src/java/org/apache/hadoop/streaming/PipeReducer.java rename to hadoop-tools/hadoop-streaming/src/main/java/org/apache/hadoop/streaming/PipeReducer.java diff --git a/hadoop-mapreduce-project/src/contrib/streaming/src/java/org/apache/hadoop/streaming/StreamBaseRecordReader.java b/hadoop-tools/hadoop-streaming/src/main/java/org/apache/hadoop/streaming/StreamBaseRecordReader.java similarity index 100% rename from hadoop-mapreduce-project/src/contrib/streaming/src/java/org/apache/hadoop/streaming/StreamBaseRecordReader.java rename to hadoop-tools/hadoop-streaming/src/main/java/org/apache/hadoop/streaming/StreamBaseRecordReader.java diff --git a/hadoop-mapreduce-project/src/contrib/streaming/src/java/org/apache/hadoop/streaming/StreamInputFormat.java b/hadoop-tools/hadoop-streaming/src/main/java/org/apache/hadoop/streaming/StreamInputFormat.java similarity index 100% rename from hadoop-mapreduce-project/src/contrib/streaming/src/java/org/apache/hadoop/streaming/StreamInputFormat.java rename to hadoop-tools/hadoop-streaming/src/main/java/org/apache/hadoop/streaming/StreamInputFormat.java diff --git a/hadoop-mapreduce-project/src/contrib/streaming/src/java/org/apache/hadoop/streaming/StreamJob.java b/hadoop-tools/hadoop-streaming/src/main/java/org/apache/hadoop/streaming/StreamJob.java similarity index 92% rename from hadoop-mapreduce-project/src/contrib/streaming/src/java/org/apache/hadoop/streaming/StreamJob.java rename to hadoop-tools/hadoop-streaming/src/main/java/org/apache/hadoop/streaming/StreamJob.java index 2da171aecf..3a0689d870 100644 --- a/hadoop-mapreduce-project/src/contrib/streaming/src/java/org/apache/hadoop/streaming/StreamJob.java +++ b/hadoop-tools/hadoop-streaming/src/main/java/org/apache/hadoop/streaming/StreamJob.java @@ -80,13 +80,13 @@ public class StreamJob implements Tool { protected static final Log LOG = LogFactory.getLog(StreamJob.class.getName()); final static String REDUCE_NONE = "NONE"; - + /** -----------Streaming CLI Implementation **/ - private CommandLineParser parser = new BasicParser(); + private CommandLineParser parser = new BasicParser(); private Options allOptions; - /**@deprecated use StreamJob() with ToolRunner or set the - * Configuration using {@link #setConf(Configuration)} and - * run with {@link #run(String[])}. + /**@deprecated use StreamJob() with ToolRunner or set the + * Configuration using {@link #setConf(Configuration)} and + * run with {@link #run(String[])}. */ @Deprecated public StreamJob(String[] argv, boolean mayExit) { @@ -94,12 +94,12 @@ public StreamJob(String[] argv, boolean mayExit) { argv_ = argv; this.config_ = new Configuration(); } - + public StreamJob() { setupOptions(); this.config_ = new Configuration(); } - + @Override public Configuration getConf() { return config_; @@ -109,13 +109,13 @@ public Configuration getConf() { public void setConf(Configuration conf) { this.config_ = conf; } - + @Override public int run(String[] args) throws Exception { try { this.argv_ = args; init(); - + preProcessArgs(); parseArgv(); if (printUsage) { @@ -123,7 +123,7 @@ public int run(String[] args) throws Exception { return 0; } postProcessArgs(); - + setJobConf(); } catch (IllegalArgumentException ex) { //ignore, since log will already be printed @@ -133,13 +133,13 @@ public int run(String[] args) throws Exception { } return submitAndMonitorJob(); } - + /** * This method creates a streaming job from the given argument list. - * The created object can be used and/or submitted to a jobtracker for + * The created object can be used and/or submitted to a jobtracker for * execution by a job agent such as JobControl * @param argv the list args for creating a streaming job - * @return the created JobConf object + * @return the created JobConf object * @throws IOException */ static public JobConf createJob(String[] argv) throws IOException { @@ -154,7 +154,7 @@ static public JobConf createJob(String[] argv) throws IOException { } /** - * This is the method that actually + * This is the method that actually * intializes the job conf and submits the job * to the jobtracker * @throws IOException @@ -169,7 +169,7 @@ public int go() throws IOException { throw new IOException(ex.getMessage()); } } - + protected void init() { try { env_ = new Environment(); @@ -186,7 +186,7 @@ void preProcessArgs() { } void postProcessArgs() throws IOException { - + if (inputSpecs_.size() == 0) { fail("Required argument: -input "); } @@ -253,7 +253,7 @@ void parseArgv() { LOG.error(oe.getMessage()); exitUsage(argv_.length > 0 && "-info".equals(argv_[0])); } - + if (cmdLine != null) { detailedUsage_ = cmdLine.hasOption("info"); if (cmdLine.hasOption("help") || detailedUsage_) { @@ -263,21 +263,21 @@ void parseArgv() { verbose_ = cmdLine.hasOption("verbose"); background_ = cmdLine.hasOption("background"); debug_ = cmdLine.hasOption("debug")? debug_ + 1 : debug_; - + String[] values = cmdLine.getOptionValues("input"); if (values != null && values.length > 0) { for (String input : values) { inputSpecs_.add(input); } } - output_ = cmdLine.getOptionValue("output"); - - mapCmd_ = cmdLine.getOptionValue("mapper"); - comCmd_ = cmdLine.getOptionValue("combiner"); - redCmd_ = cmdLine.getOptionValue("reducer"); - + output_ = cmdLine.getOptionValue("output"); + + mapCmd_ = cmdLine.getOptionValue("mapper"); + comCmd_ = cmdLine.getOptionValue("combiner"); + redCmd_ = cmdLine.getOptionValue("reducer"); + lazyOutput_ = cmdLine.hasOption("lazyOutput"); - + values = cmdLine.getOptionValues("file"); if (values != null && values.length > 0) { LOG.warn("-file option is deprecated, please use generic option" + @@ -306,34 +306,34 @@ void parseArgv() { LOG.warn("-dfs option is deprecated, please use -fs instead."); config_.set("fs.default.name", fsName); } - - additionalConfSpec_ = cmdLine.getOptionValue("additionalconfspec"); - inputFormatSpec_ = cmdLine.getOptionValue("inputformat"); + + additionalConfSpec_ = cmdLine.getOptionValue("additionalconfspec"); + inputFormatSpec_ = cmdLine.getOptionValue("inputformat"); outputFormatSpec_ = cmdLine.getOptionValue("outputformat"); - numReduceTasksSpec_ = cmdLine.getOptionValue("numReduceTasks"); + numReduceTasksSpec_ = cmdLine.getOptionValue("numReduceTasks"); partitionerSpec_ = cmdLine.getOptionValue("partitioner"); - inReaderSpec_ = cmdLine.getOptionValue("inputreader"); - mapDebugSpec_ = cmdLine.getOptionValue("mapdebug"); + inReaderSpec_ = cmdLine.getOptionValue("inputreader"); + mapDebugSpec_ = cmdLine.getOptionValue("mapdebug"); reduceDebugSpec_ = cmdLine.getOptionValue("reducedebug"); ioSpec_ = cmdLine.getOptionValue("io"); - - String[] car = cmdLine.getOptionValues("cacheArchive"); + + String[] car = cmdLine.getOptionValues("cacheArchive"); if (null != car && car.length > 0){ LOG.warn("-cacheArchive option is deprecated, please use -archives instead."); for(String s : car){ - cacheArchives = (cacheArchives == null)?s :cacheArchives + "," + s; + cacheArchives = (cacheArchives == null)?s :cacheArchives + "," + s; } } - String[] caf = cmdLine.getOptionValues("cacheFile"); + String[] caf = cmdLine.getOptionValues("cacheFile"); if (null != caf && caf.length > 0){ LOG.warn("-cacheFile option is deprecated, please use -files instead."); for(String s : caf){ - cacheFiles = (cacheFiles == null)?s :cacheFiles + "," + s; + cacheFiles = (cacheFiles == null)?s :cacheFiles + "," + s; } } - - String[] jobconf = cmdLine.getOptionValues("jobconf"); + + String[] jobconf = cmdLine.getOptionValues("jobconf"); if (null != jobconf && jobconf.length > 0){ LOG.warn("-jobconf option is deprecated, please use -D instead."); for(String s : jobconf){ @@ -341,8 +341,8 @@ void parseArgv() { config_.set(parts[0], parts[1]); } } - - String[] cmd = cmdLine.getOptionValues("cmdenv"); + + String[] cmd = cmdLine.getOptionValues("cmdenv"); if (null != cmd && cmd.length > 0){ for(String s : cmd) { if (addTaskEnvironment_.length() > 0) { @@ -361,8 +361,8 @@ protected void msg(String msg) { System.out.println("STREAM: " + msg); } } - - private Option createOption(String name, String desc, + + private Option createOption(String name, String desc, String argName, int max, boolean required){ return OptionBuilder .withArgName(argName) @@ -371,87 +371,87 @@ private Option createOption(String name, String desc, .isRequired(required) .create(name); } - + private Option createBoolOption(String name, String desc){ return OptionBuilder.withDescription(desc).create(name); } - - private void validate(final List values) + + private void validate(final List values) throws IllegalArgumentException { for (String file : values) { - File f = new File(file); + File f = new File(file); if (!f.canRead()) { - fail("File: " + f.getAbsolutePath() - + " does not exist, or is not readable."); + fail("File: " + f.getAbsolutePath() + + " does not exist, or is not readable."); } } } - + private void setupOptions(){ // input and output are not required for -info and -help options, // though they are required for streaming job to be run. - Option input = createOption("input", - "DFS input file(s) for the Map step", - "path", - Integer.MAX_VALUE, - false); - - Option output = createOption("output", - "DFS output directory for the Reduce step", - "path", 1, false); - Option mapper = createOption("mapper", + Option input = createOption("input", + "DFS input file(s) for the Map step", + "path", + Integer.MAX_VALUE, + false); + + Option output = createOption("output", + "DFS output directory for the Reduce step", + "path", 1, false); + Option mapper = createOption("mapper", "The streaming command to run", "cmd", 1, false); - Option combiner = createOption("combiner", + Option combiner = createOption("combiner", "The streaming command to run", "cmd", 1, false); - // reducer could be NONE - Option reducer = createOption("reducer", - "The streaming command to run", "cmd", 1, false); - Option file = createOption("file", - "File to be shipped in the Job jar file", - "file", Integer.MAX_VALUE, false); - Option dfs = createOption("dfs", - "Optional. Override DFS configuration", "|local", 1, false); - Option additionalconfspec = createOption("additionalconfspec", + // reducer could be NONE + Option reducer = createOption("reducer", + "The streaming command to run", "cmd", 1, false); + Option file = createOption("file", + "File to be shipped in the Job jar file", + "file", Integer.MAX_VALUE, false); + Option dfs = createOption("dfs", + "Optional. Override DFS configuration", "|local", 1, false); + Option additionalconfspec = createOption("additionalconfspec", "Optional.", "spec", 1, false); - Option inputformat = createOption("inputformat", + Option inputformat = createOption("inputformat", "Optional.", "spec", 1, false); - Option outputformat = createOption("outputformat", + Option outputformat = createOption("outputformat", "Optional.", "spec", 1, false); - Option partitioner = createOption("partitioner", + Option partitioner = createOption("partitioner", "Optional.", "spec", 1, false); - Option numReduceTasks = createOption("numReduceTasks", + Option numReduceTasks = createOption("numReduceTasks", "Optional.", "spec",1, false ); - Option inputreader = createOption("inputreader", + Option inputreader = createOption("inputreader", "Optional.", "spec", 1, false); Option mapDebug = createOption("mapdebug", "Optional.", "spec", 1, false); Option reduceDebug = createOption("reducedebug", "Optional", "spec",1, false); - Option jobconf = - createOption("jobconf", - "(n=v) Optional. Add or override a JobConf property.", + Option jobconf = + createOption("jobconf", + "(n=v) Optional. Add or override a JobConf property.", "spec", 1, false); - - Option cmdenv = - createOption("cmdenv", "(n=v) Pass env.var to streaming commands.", + + Option cmdenv = + createOption("cmdenv", "(n=v) Pass env.var to streaming commands.", "spec", 1, false); - Option cacheFile = createOption("cacheFile", + Option cacheFile = createOption("cacheFile", "File name URI", "fileNameURI", Integer.MAX_VALUE, false); - Option cacheArchive = createOption("cacheArchive", + Option cacheArchive = createOption("cacheArchive", "File name URI", "fileNameURI", Integer.MAX_VALUE, false); Option io = createOption("io", "Optional.", "spec", 1, false); - + // boolean properties - - Option background = createBoolOption("background", "Submit the job and don't wait till it completes."); - Option verbose = createBoolOption("verbose", "print verbose output"); - Option info = createBoolOption("info", "print verbose output"); - Option help = createBoolOption("help", "print this help message"); - Option debug = createBoolOption("debug", "print debug output"); + + Option background = createBoolOption("background", "Submit the job and don't wait till it completes."); + Option verbose = createBoolOption("verbose", "print verbose output"); + Option info = createBoolOption("info", "print verbose output"); + Option help = createBoolOption("help", "print this help message"); + Option debug = createBoolOption("debug", "print debug output"); Option lazyOutput = createBoolOption("lazyOutput", "create outputs lazily"); - + allOptions = new Options(). addOption(input). addOption(output). @@ -490,9 +490,9 @@ private void printUsage(boolean detailed) { System.out.println("Usage: $HADOOP_PREFIX/bin/hadoop jar hadoop-streaming.jar" + " [options]"); System.out.println("Options:"); - System.out.println(" -input DFS input file(s) for the Map" + System.out.println(" -input DFS input file(s) for the Map" + " step."); - System.out.println(" -output DFS output directory for the" + System.out.println(" -output DFS output directory for the" + " Reduce step."); System.out.println(" -mapper Optional. Command" + " to be run as mapper."); @@ -501,7 +501,7 @@ private void printUsage(boolean detailed) { System.out.println(" -reducer Optional. Command" + " to be run as reducer."); System.out.println(" -file Optional. File/dir to be " - + "shipped in the Job jar file.\n" + + + "shipped in the Job jar file.\n" + " Deprecated. Use generic option \"-files\" instead."); System.out.println(" -inputformat \n" @@ -533,7 +533,7 @@ private void printUsage(boolean detailed) { GenericOptionsParser.printGenericCommandUsage(System.out); if (!detailed) { - System.out.println(); + System.out.println(); System.out.println("For more details about these options:"); System.out.println("Use " + "$HADOOP_PREFIX/bin/hadoop jar hadoop-streaming.jar -info"); @@ -592,7 +592,7 @@ private void printUsage(boolean detailed) { System.out.println(" -D " + MRConfig.LOCAL_DIR + "=/tmp/local"); System.out.println(" -D " + JTConfig.JT_SYSTEM_DIR + "=/tmp/system"); System.out.println(" -D " + MRConfig.TEMP_DIR + "=/tmp/temp"); - System.out.println("To treat tasks with non-zero exit status as SUCCEDED:"); + System.out.println("To treat tasks with non-zero exit status as SUCCEDED:"); System.out.println(" -D stream.non.zero.exit.is.failure=false"); System.out.println("Use a custom hadoop streaming build along with standard" + " hadoop install:"); @@ -621,7 +621,7 @@ private void printUsage(boolean detailed) { System.out.println(" daily logs for days in month 2006-04"); } - public void fail(String message) { + public void fail(String message) { System.err.println(message); System.err.println("Try -help for more information"); throw new IllegalArgumentException(message); @@ -659,7 +659,7 @@ protected String packageJobJar() throws IOException { // $HADOOP_PREFIX/bin/hadoop jar /not/first/on/classpath/custom-hadoop-streaming.jar // where findInClasspath() would find the version of hadoop-streaming.jar in $HADOOP_PREFIX String runtimeClasses = config_.get("stream.shipped.hadoopstreaming"); // jar or class dir - + if (runtimeClasses == null) { runtimeClasses = StreamUtil.findInClasspath(StreamJob.class.getName()); } @@ -700,7 +700,7 @@ protected String packageJobJar() throws IOException { builder.merge(packageFiles_, unjarFiles, jobJarName); return jobJarName; } - + /** * get the uris of all the files/caches */ @@ -710,7 +710,7 @@ protected void getURIs(String lcacheArchives, String lcacheFiles) { fileURIs = StringUtils.stringToURI(files); archiveURIs = StringUtils.stringToURI(archives); } - + protected void setJobConf() throws IOException { if (additionalConfSpec_ != null) { LOG.warn("-additionalconfspec option is deprecated, please use -conf instead."); @@ -719,15 +719,15 @@ protected void setJobConf() throws IOException { // general MapRed job properties jobConf_ = new JobConf(config_, StreamJob.class); - + // All streaming jobs get the task timeout value // from the configuration settings. // The correct FS must be set before this is called! - // (to resolve local vs. dfs drive letter differences) + // (to resolve local vs. dfs drive letter differences) // (mapreduce.job.working.dir will be lazily initialized ONCE and depends on FS) for (int i = 0; i < inputSpecs_.size(); i++) { - FileInputFormat.addInputPaths(jobConf_, + FileInputFormat.addInputPaths(jobConf_, (String) inputSpecs_.get(i)); } @@ -773,7 +773,7 @@ protected void setJobConf() throws IOException { fail("-inputformat : class not found : " + inputFormatSpec_); } } - } + } if (fmt == null) { fmt = StreamInputFormat.class; } @@ -786,20 +786,20 @@ protected void setJobConf() throws IOException { jobConf_.set("stream.reduce.input", ioSpec_); jobConf_.set("stream.reduce.output", ioSpec_); } - - Class idResolverClass = + + Class idResolverClass = jobConf_.getClass("stream.io.identifier.resolver.class", IdentifierResolver.class, IdentifierResolver.class); IdentifierResolver idResolver = ReflectionUtils.newInstance(idResolverClass, jobConf_); - + idResolver.resolve(jobConf_.get("stream.map.input", IdentifierResolver.TEXT_ID)); jobConf_.setClass("stream.map.input.writer.class", idResolver.getInputWriterClass(), InputWriter.class); - + idResolver.resolve(jobConf_.get("stream.reduce.input", IdentifierResolver.TEXT_ID)); jobConf_.setClass("stream.reduce.input.writer.class", idResolver.getInputWriterClass(), InputWriter.class); - + jobConf_.set("stream.addenvironment", addTaskEnvironment_); boolean isMapperACommand = false; @@ -811,7 +811,7 @@ protected void setJobConf() throws IOException { isMapperACommand = true; jobConf_.setMapperClass(PipeMapper.class); jobConf_.setMapRunnerClass(PipeMapRunner.class); - jobConf_.set("stream.map.streamprocessor", + jobConf_.set("stream.map.streamprocessor", URLEncoder.encode(mapCmd_, "UTF-8")); } } @@ -900,7 +900,7 @@ protected void setJobConf() throws IOException { jobConf_.set(k, v); } } - + FileOutputFormat.setOutputPath(jobConf_, new Path(output_)); fmt = null; if (outputFormatSpec_!= null) { @@ -928,7 +928,7 @@ protected void setJobConf() throws IOException { fail("-partitioner : class not found : " + partitionerSpec_); } } - + if(mapDebugSpec_ != null){ jobConf_.setMapDebugScript(mapDebugSpec_); } @@ -942,7 +942,7 @@ protected void setJobConf() throws IOException { if (jar_ != null) { jobConf_.setJar(jar_); } - + if ((cacheArchives != null) || (cacheFiles != null)){ getURIs(cacheArchives, cacheFiles); boolean b = DistributedCache.checkURIs(fileURIs, archiveURIs); @@ -955,11 +955,11 @@ protected void setJobConf() throws IOException { DistributedCache.setCacheArchives(archiveURIs, jobConf_); if (cacheFiles != null) DistributedCache.setCacheFiles(fileURIs, jobConf_); - + if (verbose_) { listJobConfProperties(); } - + msg("submitting to jobconf: " + getJobTrackerHostPort()); } @@ -1013,7 +1013,7 @@ public int submitAndMonitorJob() throws IOException { LOG.error("Error launching job , Invalid job conf : " + je.getMessage()); return 3; } catch(FileAlreadyExistsException fae) { - LOG.error("Error launching job , Output path already exists : " + LOG.error("Error launching job , Output path already exists : " + fae.getMessage()); return 4; } catch(IOException ioe) { @@ -1047,9 +1047,9 @@ public int submitAndMonitorJob() throws IOException { protected ArrayList inputSpecs_ = new ArrayList(); protected TreeSet seenPrimary_ = new TreeSet(); protected boolean hasSimpleInputSpecs_; - protected ArrayList packageFiles_ = new ArrayList(); + protected ArrayList packageFiles_ = new ArrayList(); protected ArrayList shippedCanonFiles_ = new ArrayList(); - //protected TreeMap userJobConfProps_ = new TreeMap(); + //protected TreeMap userJobConfProps_ = new TreeMap(); protected String output_; protected String mapCmd_; protected String comCmd_; diff --git a/hadoop-mapreduce-project/src/contrib/streaming/src/java/org/apache/hadoop/streaming/StreamKeyValUtil.java b/hadoop-tools/hadoop-streaming/src/main/java/org/apache/hadoop/streaming/StreamKeyValUtil.java similarity index 100% rename from hadoop-mapreduce-project/src/contrib/streaming/src/java/org/apache/hadoop/streaming/StreamKeyValUtil.java rename to hadoop-tools/hadoop-streaming/src/main/java/org/apache/hadoop/streaming/StreamKeyValUtil.java diff --git a/hadoop-mapreduce-project/src/contrib/streaming/src/java/org/apache/hadoop/streaming/StreamUtil.java b/hadoop-tools/hadoop-streaming/src/main/java/org/apache/hadoop/streaming/StreamUtil.java similarity index 100% rename from hadoop-mapreduce-project/src/contrib/streaming/src/java/org/apache/hadoop/streaming/StreamUtil.java rename to hadoop-tools/hadoop-streaming/src/main/java/org/apache/hadoop/streaming/StreamUtil.java diff --git a/hadoop-mapreduce-project/src/contrib/streaming/src/java/org/apache/hadoop/streaming/StreamXmlRecordReader.java b/hadoop-tools/hadoop-streaming/src/main/java/org/apache/hadoop/streaming/StreamXmlRecordReader.java similarity index 100% rename from hadoop-mapreduce-project/src/contrib/streaming/src/java/org/apache/hadoop/streaming/StreamXmlRecordReader.java rename to hadoop-tools/hadoop-streaming/src/main/java/org/apache/hadoop/streaming/StreamXmlRecordReader.java diff --git a/hadoop-mapreduce-project/src/contrib/streaming/src/java/org/apache/hadoop/streaming/io/IdentifierResolver.java b/hadoop-tools/hadoop-streaming/src/main/java/org/apache/hadoop/streaming/io/IdentifierResolver.java similarity index 100% rename from hadoop-mapreduce-project/src/contrib/streaming/src/java/org/apache/hadoop/streaming/io/IdentifierResolver.java rename to hadoop-tools/hadoop-streaming/src/main/java/org/apache/hadoop/streaming/io/IdentifierResolver.java diff --git a/hadoop-mapreduce-project/src/contrib/streaming/src/java/org/apache/hadoop/streaming/io/InputWriter.java b/hadoop-tools/hadoop-streaming/src/main/java/org/apache/hadoop/streaming/io/InputWriter.java similarity index 100% rename from hadoop-mapreduce-project/src/contrib/streaming/src/java/org/apache/hadoop/streaming/io/InputWriter.java rename to hadoop-tools/hadoop-streaming/src/main/java/org/apache/hadoop/streaming/io/InputWriter.java diff --git a/hadoop-mapreduce-project/src/contrib/streaming/src/java/org/apache/hadoop/streaming/io/OutputReader.java b/hadoop-tools/hadoop-streaming/src/main/java/org/apache/hadoop/streaming/io/OutputReader.java similarity index 100% rename from hadoop-mapreduce-project/src/contrib/streaming/src/java/org/apache/hadoop/streaming/io/OutputReader.java rename to hadoop-tools/hadoop-streaming/src/main/java/org/apache/hadoop/streaming/io/OutputReader.java diff --git a/hadoop-mapreduce-project/src/contrib/streaming/src/java/org/apache/hadoop/streaming/io/RawBytesInputWriter.java b/hadoop-tools/hadoop-streaming/src/main/java/org/apache/hadoop/streaming/io/RawBytesInputWriter.java similarity index 100% rename from hadoop-mapreduce-project/src/contrib/streaming/src/java/org/apache/hadoop/streaming/io/RawBytesInputWriter.java rename to hadoop-tools/hadoop-streaming/src/main/java/org/apache/hadoop/streaming/io/RawBytesInputWriter.java diff --git a/hadoop-mapreduce-project/src/contrib/streaming/src/java/org/apache/hadoop/streaming/io/RawBytesOutputReader.java b/hadoop-tools/hadoop-streaming/src/main/java/org/apache/hadoop/streaming/io/RawBytesOutputReader.java similarity index 100% rename from hadoop-mapreduce-project/src/contrib/streaming/src/java/org/apache/hadoop/streaming/io/RawBytesOutputReader.java rename to hadoop-tools/hadoop-streaming/src/main/java/org/apache/hadoop/streaming/io/RawBytesOutputReader.java diff --git a/hadoop-mapreduce-project/src/contrib/streaming/src/java/org/apache/hadoop/streaming/io/TextInputWriter.java b/hadoop-tools/hadoop-streaming/src/main/java/org/apache/hadoop/streaming/io/TextInputWriter.java similarity index 100% rename from hadoop-mapreduce-project/src/contrib/streaming/src/java/org/apache/hadoop/streaming/io/TextInputWriter.java rename to hadoop-tools/hadoop-streaming/src/main/java/org/apache/hadoop/streaming/io/TextInputWriter.java diff --git a/hadoop-mapreduce-project/src/contrib/streaming/src/java/org/apache/hadoop/streaming/io/TextOutputReader.java b/hadoop-tools/hadoop-streaming/src/main/java/org/apache/hadoop/streaming/io/TextOutputReader.java similarity index 100% rename from hadoop-mapreduce-project/src/contrib/streaming/src/java/org/apache/hadoop/streaming/io/TextOutputReader.java rename to hadoop-tools/hadoop-streaming/src/main/java/org/apache/hadoop/streaming/io/TextOutputReader.java diff --git a/hadoop-mapreduce-project/src/contrib/streaming/src/java/org/apache/hadoop/streaming/io/TypedBytesInputWriter.java b/hadoop-tools/hadoop-streaming/src/main/java/org/apache/hadoop/streaming/io/TypedBytesInputWriter.java similarity index 100% rename from hadoop-mapreduce-project/src/contrib/streaming/src/java/org/apache/hadoop/streaming/io/TypedBytesInputWriter.java rename to hadoop-tools/hadoop-streaming/src/main/java/org/apache/hadoop/streaming/io/TypedBytesInputWriter.java diff --git a/hadoop-mapreduce-project/src/contrib/streaming/src/java/org/apache/hadoop/streaming/io/TypedBytesOutputReader.java b/hadoop-tools/hadoop-streaming/src/main/java/org/apache/hadoop/streaming/io/TypedBytesOutputReader.java similarity index 100% rename from hadoop-mapreduce-project/src/contrib/streaming/src/java/org/apache/hadoop/streaming/io/TypedBytesOutputReader.java rename to hadoop-tools/hadoop-streaming/src/main/java/org/apache/hadoop/streaming/io/TypedBytesOutputReader.java diff --git a/hadoop-mapreduce-project/src/contrib/streaming/src/java/org/apache/hadoop/streaming/package.html b/hadoop-tools/hadoop-streaming/src/main/java/org/apache/hadoop/streaming/package.html similarity index 100% rename from hadoop-mapreduce-project/src/contrib/streaming/src/java/org/apache/hadoop/streaming/package.html rename to hadoop-tools/hadoop-streaming/src/main/java/org/apache/hadoop/streaming/package.html diff --git a/hadoop-mapreduce-project/src/contrib/streaming/src/java/org/apache/hadoop/typedbytes/Type.java b/hadoop-tools/hadoop-streaming/src/main/java/org/apache/hadoop/typedbytes/Type.java similarity index 100% rename from hadoop-mapreduce-project/src/contrib/streaming/src/java/org/apache/hadoop/typedbytes/Type.java rename to hadoop-tools/hadoop-streaming/src/main/java/org/apache/hadoop/typedbytes/Type.java diff --git a/hadoop-mapreduce-project/src/contrib/streaming/src/java/org/apache/hadoop/typedbytes/TypedBytesInput.java b/hadoop-tools/hadoop-streaming/src/main/java/org/apache/hadoop/typedbytes/TypedBytesInput.java similarity index 100% rename from hadoop-mapreduce-project/src/contrib/streaming/src/java/org/apache/hadoop/typedbytes/TypedBytesInput.java rename to hadoop-tools/hadoop-streaming/src/main/java/org/apache/hadoop/typedbytes/TypedBytesInput.java diff --git a/hadoop-mapreduce-project/src/contrib/streaming/src/java/org/apache/hadoop/typedbytes/TypedBytesOutput.java b/hadoop-tools/hadoop-streaming/src/main/java/org/apache/hadoop/typedbytes/TypedBytesOutput.java similarity index 100% rename from hadoop-mapreduce-project/src/contrib/streaming/src/java/org/apache/hadoop/typedbytes/TypedBytesOutput.java rename to hadoop-tools/hadoop-streaming/src/main/java/org/apache/hadoop/typedbytes/TypedBytesOutput.java diff --git a/hadoop-mapreduce-project/src/contrib/streaming/src/java/org/apache/hadoop/typedbytes/TypedBytesRecordInput.java b/hadoop-tools/hadoop-streaming/src/main/java/org/apache/hadoop/typedbytes/TypedBytesRecordInput.java similarity index 100% rename from hadoop-mapreduce-project/src/contrib/streaming/src/java/org/apache/hadoop/typedbytes/TypedBytesRecordInput.java rename to hadoop-tools/hadoop-streaming/src/main/java/org/apache/hadoop/typedbytes/TypedBytesRecordInput.java diff --git a/hadoop-mapreduce-project/src/contrib/streaming/src/java/org/apache/hadoop/typedbytes/TypedBytesRecordOutput.java b/hadoop-tools/hadoop-streaming/src/main/java/org/apache/hadoop/typedbytes/TypedBytesRecordOutput.java similarity index 100% rename from hadoop-mapreduce-project/src/contrib/streaming/src/java/org/apache/hadoop/typedbytes/TypedBytesRecordOutput.java rename to hadoop-tools/hadoop-streaming/src/main/java/org/apache/hadoop/typedbytes/TypedBytesRecordOutput.java diff --git a/hadoop-mapreduce-project/src/contrib/streaming/src/java/org/apache/hadoop/typedbytes/TypedBytesWritable.java b/hadoop-tools/hadoop-streaming/src/main/java/org/apache/hadoop/typedbytes/TypedBytesWritable.java similarity index 100% rename from hadoop-mapreduce-project/src/contrib/streaming/src/java/org/apache/hadoop/typedbytes/TypedBytesWritable.java rename to hadoop-tools/hadoop-streaming/src/main/java/org/apache/hadoop/typedbytes/TypedBytesWritable.java diff --git a/hadoop-mapreduce-project/src/contrib/streaming/src/java/org/apache/hadoop/typedbytes/TypedBytesWritableInput.java b/hadoop-tools/hadoop-streaming/src/main/java/org/apache/hadoop/typedbytes/TypedBytesWritableInput.java similarity index 100% rename from hadoop-mapreduce-project/src/contrib/streaming/src/java/org/apache/hadoop/typedbytes/TypedBytesWritableInput.java rename to hadoop-tools/hadoop-streaming/src/main/java/org/apache/hadoop/typedbytes/TypedBytesWritableInput.java diff --git a/hadoop-mapreduce-project/src/contrib/streaming/src/java/org/apache/hadoop/typedbytes/TypedBytesWritableOutput.java b/hadoop-tools/hadoop-streaming/src/main/java/org/apache/hadoop/typedbytes/TypedBytesWritableOutput.java similarity index 100% rename from hadoop-mapreduce-project/src/contrib/streaming/src/java/org/apache/hadoop/typedbytes/TypedBytesWritableOutput.java rename to hadoop-tools/hadoop-streaming/src/main/java/org/apache/hadoop/typedbytes/TypedBytesWritableOutput.java diff --git a/hadoop-mapreduce-project/src/contrib/streaming/src/java/org/apache/hadoop/typedbytes/package.html b/hadoop-tools/hadoop-streaming/src/main/java/org/apache/hadoop/typedbytes/package.html similarity index 100% rename from hadoop-mapreduce-project/src/contrib/streaming/src/java/org/apache/hadoop/typedbytes/package.html rename to hadoop-tools/hadoop-streaming/src/main/java/org/apache/hadoop/typedbytes/package.html diff --git a/hadoop-mapreduce-project/src/test/mapred/testjar/ClassWithNoPackage.java b/hadoop-tools/hadoop-streaming/src/test/java/ClassWithNoPackage.java similarity index 100% rename from hadoop-mapreduce-project/src/test/mapred/testjar/ClassWithNoPackage.java rename to hadoop-tools/hadoop-streaming/src/test/java/ClassWithNoPackage.java diff --git a/hadoop-mapreduce-project/src/contrib/streaming/src/test/org/apache/hadoop/streaming/DelayEchoApp.java b/hadoop-tools/hadoop-streaming/src/test/java/org/apache/hadoop/streaming/DelayEchoApp.java similarity index 100% rename from hadoop-mapreduce-project/src/contrib/streaming/src/test/org/apache/hadoop/streaming/DelayEchoApp.java rename to hadoop-tools/hadoop-streaming/src/test/java/org/apache/hadoop/streaming/DelayEchoApp.java diff --git a/hadoop-mapreduce-project/src/contrib/streaming/src/test/org/apache/hadoop/streaming/FailApp.java b/hadoop-tools/hadoop-streaming/src/test/java/org/apache/hadoop/streaming/FailApp.java similarity index 100% rename from hadoop-mapreduce-project/src/contrib/streaming/src/test/org/apache/hadoop/streaming/FailApp.java rename to hadoop-tools/hadoop-streaming/src/test/java/org/apache/hadoop/streaming/FailApp.java diff --git a/hadoop-mapreduce-project/src/contrib/streaming/src/test/org/apache/hadoop/streaming/RawBytesMapApp.java b/hadoop-tools/hadoop-streaming/src/test/java/org/apache/hadoop/streaming/RawBytesMapApp.java similarity index 100% rename from hadoop-mapreduce-project/src/contrib/streaming/src/test/org/apache/hadoop/streaming/RawBytesMapApp.java rename to hadoop-tools/hadoop-streaming/src/test/java/org/apache/hadoop/streaming/RawBytesMapApp.java diff --git a/hadoop-mapreduce-project/src/contrib/streaming/src/test/org/apache/hadoop/streaming/RawBytesReduceApp.java b/hadoop-tools/hadoop-streaming/src/test/java/org/apache/hadoop/streaming/RawBytesReduceApp.java similarity index 100% rename from hadoop-mapreduce-project/src/contrib/streaming/src/test/org/apache/hadoop/streaming/RawBytesReduceApp.java rename to hadoop-tools/hadoop-streaming/src/test/java/org/apache/hadoop/streaming/RawBytesReduceApp.java diff --git a/hadoop-mapreduce-project/src/contrib/streaming/src/test/org/apache/hadoop/streaming/StderrApp.java b/hadoop-tools/hadoop-streaming/src/test/java/org/apache/hadoop/streaming/StderrApp.java similarity index 100% rename from hadoop-mapreduce-project/src/contrib/streaming/src/test/org/apache/hadoop/streaming/StderrApp.java rename to hadoop-tools/hadoop-streaming/src/test/java/org/apache/hadoop/streaming/StderrApp.java diff --git a/hadoop-mapreduce-project/src/contrib/streaming/src/test/org/apache/hadoop/streaming/StreamAggregate.java b/hadoop-tools/hadoop-streaming/src/test/java/org/apache/hadoop/streaming/StreamAggregate.java similarity index 100% rename from hadoop-mapreduce-project/src/contrib/streaming/src/test/org/apache/hadoop/streaming/StreamAggregate.java rename to hadoop-tools/hadoop-streaming/src/test/java/org/apache/hadoop/streaming/StreamAggregate.java diff --git a/hadoop-mapreduce-project/src/contrib/streaming/src/test/org/apache/hadoop/streaming/TestAutoInputFormat.java b/hadoop-tools/hadoop-streaming/src/test/java/org/apache/hadoop/streaming/TestAutoInputFormat.java similarity index 100% rename from hadoop-mapreduce-project/src/contrib/streaming/src/test/org/apache/hadoop/streaming/TestAutoInputFormat.java rename to hadoop-tools/hadoop-streaming/src/test/java/org/apache/hadoop/streaming/TestAutoInputFormat.java diff --git a/hadoop-mapreduce-project/src/contrib/streaming/src/test/org/apache/hadoop/streaming/TestClassWithNoPackage.java b/hadoop-tools/hadoop-streaming/src/test/java/org/apache/hadoop/streaming/TestClassWithNoPackage.java similarity index 83% rename from hadoop-mapreduce-project/src/contrib/streaming/src/test/org/apache/hadoop/streaming/TestClassWithNoPackage.java rename to hadoop-tools/hadoop-streaming/src/test/java/org/apache/hadoop/streaming/TestClassWithNoPackage.java index b0b0d4e93f..4d5cf61af6 100644 --- a/hadoop-mapreduce-project/src/contrib/streaming/src/test/org/apache/hadoop/streaming/TestClassWithNoPackage.java +++ b/hadoop-tools/hadoop-streaming/src/test/java/org/apache/hadoop/streaming/TestClassWithNoPackage.java @@ -21,6 +21,8 @@ import java.net.URL; import java.net.URLClassLoader; import java.net.MalformedURLException; + +import org.apache.hadoop.util.JarFinder; import org.junit.Test; import static org.junit.Assert.*; import org.apache.hadoop.conf.Configuration; @@ -31,15 +33,16 @@ */ public class TestClassWithNoPackage { - private final String NAME = "ClassWithNoPackage"; - private final String JAR = "build/test/mapred/testjar/testjob.jar"; - @Test - public void testGoodClassOrNull() throws MalformedURLException { + public void testGoodClassOrNull() throws Exception { + String NAME = "ClassWithNoPackage"; + ClassLoader cl = TestClassWithNoPackage.class.getClassLoader(); + String JAR = JarFinder.getJar(cl.loadClass(NAME)); + // Add testjob jar file to classpath. Configuration conf = new Configuration(); - conf.setClassLoader(new URLClassLoader(new URL[]{new URL("file", null, JAR)}, - conf.getClassLoader())); + conf.setClassLoader(new URLClassLoader(new URL[]{new URL("file", null, JAR)}, + null)); // Get class with no package name. String defaultPackage = this.getClass().getPackage().getName(); Class c = StreamUtil.goodClassOrNull(conf, NAME, defaultPackage); diff --git a/hadoop-mapreduce-project/src/contrib/streaming/src/test/org/apache/hadoop/streaming/TestDumpTypedBytes.java b/hadoop-tools/hadoop-streaming/src/test/java/org/apache/hadoop/streaming/TestDumpTypedBytes.java similarity index 100% rename from hadoop-mapreduce-project/src/contrib/streaming/src/test/org/apache/hadoop/streaming/TestDumpTypedBytes.java rename to hadoop-tools/hadoop-streaming/src/test/java/org/apache/hadoop/streaming/TestDumpTypedBytes.java diff --git a/hadoop-mapreduce-project/src/contrib/streaming/src/test/org/apache/hadoop/streaming/TestFileArgs.java b/hadoop-tools/hadoop-streaming/src/test/java/org/apache/hadoop/streaming/TestFileArgs.java similarity index 86% rename from hadoop-mapreduce-project/src/contrib/streaming/src/test/org/apache/hadoop/streaming/TestFileArgs.java rename to hadoop-tools/hadoop-streaming/src/test/java/org/apache/hadoop/streaming/TestFileArgs.java index b9c720de12..3b4a9f3f6a 100644 --- a/hadoop-mapreduce-project/src/contrib/streaming/src/test/org/apache/hadoop/streaming/TestFileArgs.java +++ b/hadoop-tools/hadoop-streaming/src/test/java/org/apache/hadoop/streaming/TestFileArgs.java @@ -20,7 +20,6 @@ import java.io.DataOutputStream; import java.io.IOException; -import java.net.URI; import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; @@ -45,7 +44,7 @@ public class TestFileArgs extends TestStreaming private Configuration conf = null; private static final String EXPECTED_OUTPUT = - "job.jar\t\nsidefile\t\ntmp\t\n"; + "job.jar\t\nsidefile\t\n"; private static final String LS_PATH = "/bin/ls"; @@ -57,8 +56,8 @@ public TestFileArgs() throws IOException fileSys = dfs.getFileSystem(); namenode = fileSys.getUri().getAuthority(); mr = new MiniMRCluster(1, namenode, 1); - strJobTracker = JTConfig.JT_IPC_ADDRESS + "=localhost:" + mr.getJobTrackerPort(); - strNamenode = "fs.default.name=hdfs://" + namenode; + strJobTracker = JTConfig.JT_IPC_ADDRESS + "=localhost:" + mr.createJobConf().get(JTConfig.JT_IPC_ADDRESS); + strNamenode = "fs.default.name=" + mr.createJobConf().get("fs.default.name"); map = LS_PATH; FileSystem.setDefaultUri(conf, "hdfs://" + namenode); @@ -69,7 +68,7 @@ public TestFileArgs() throws IOException public void setUp() throws IOException { // Set up side file FileSystem localFs = FileSystem.getLocal(conf); - DataOutputStream dos = localFs.create(new Path("sidefile")); + DataOutputStream dos = localFs.create(new Path("target/sidefile")); dos.write("hello world\n".getBytes("UTF-8")); dos.close(); @@ -102,13 +101,18 @@ protected Configuration getConf() { @Override protected String[] genArgs() { args.add("-file"); - args.add(new java.io.File("sidefile").getAbsolutePath()); + args.add(new java.io.File("target/sidefile").getAbsolutePath()); args.add("-numReduceTasks"); args.add("0"); args.add("-jobconf"); args.add(strNamenode); args.add("-jobconf"); args.add(strJobTracker); + args.add("-jobconf"); + args.add("mapred.jar=" + STREAMING_JAR); + args.add("-jobconf"); + args.add("mapreduce.framework.name=yarn"); + args.add("-verbose"); return super.genArgs(); } } diff --git a/hadoop-mapreduce-project/src/contrib/streaming/src/test/org/apache/hadoop/streaming/TestGzipInput.java b/hadoop-tools/hadoop-streaming/src/test/java/org/apache/hadoop/streaming/TestGzipInput.java similarity index 100% rename from hadoop-mapreduce-project/src/contrib/streaming/src/test/org/apache/hadoop/streaming/TestGzipInput.java rename to hadoop-tools/hadoop-streaming/src/test/java/org/apache/hadoop/streaming/TestGzipInput.java diff --git a/hadoop-mapreduce-project/src/contrib/streaming/src/test/org/apache/hadoop/streaming/TestLoadTypedBytes.java b/hadoop-tools/hadoop-streaming/src/test/java/org/apache/hadoop/streaming/TestLoadTypedBytes.java similarity index 100% rename from hadoop-mapreduce-project/src/contrib/streaming/src/test/org/apache/hadoop/streaming/TestLoadTypedBytes.java rename to hadoop-tools/hadoop-streaming/src/test/java/org/apache/hadoop/streaming/TestLoadTypedBytes.java diff --git a/hadoop-mapreduce-project/src/contrib/streaming/src/test/org/apache/hadoop/streaming/TestMultipleArchiveFiles.java b/hadoop-tools/hadoop-streaming/src/test/java/org/apache/hadoop/streaming/TestMultipleArchiveFiles.java similarity index 90% rename from hadoop-mapreduce-project/src/contrib/streaming/src/test/org/apache/hadoop/streaming/TestMultipleArchiveFiles.java rename to hadoop-tools/hadoop-streaming/src/test/java/org/apache/hadoop/streaming/TestMultipleArchiveFiles.java index a251b19acb..105bfb1397 100644 --- a/hadoop-mapreduce-project/src/contrib/streaming/src/test/org/apache/hadoop/streaming/TestMultipleArchiveFiles.java +++ b/hadoop-tools/hadoop-streaming/src/test/java/org/apache/hadoop/streaming/TestMultipleArchiveFiles.java @@ -45,7 +45,7 @@ import static org.junit.Assert.*; /** - * This class tests cacheArchive option of streaming + * This class tests cacheArchive option of streaming * The test case creates 2 archive files, ships it with hadoop * streaming and compares the output with expected output */ @@ -75,13 +75,13 @@ public TestMultipleArchiveFiles() throws Exception { CACHE_FILE_2 = new File("cacheArchive2"); input = "HADOOP"; expectedOutput = "HADOOP\t\nHADOOP\t\n"; - conf = new Configuration(); - dfs = new MiniDFSCluster(conf, 1, true, null); + conf = new Configuration(); + dfs = new MiniDFSCluster(conf, 1, true, null); fileSys = dfs.getFileSystem(); namenode = fileSys.getUri().getAuthority(); - mr = new MiniMRCluster(1, namenode, 3); - strJobTracker = JTConfig.JT_IPC_ADDRESS + "=localhost:" + mr.getJobTrackerPort(); - strNamenode = "fs.default.name=" + namenode; + mr = new MiniMRCluster(1, namenode, 1); + strJobTracker = JTConfig.JT_IPC_ADDRESS + "=localhost:" + mr.createJobConf().get(JTConfig.JT_IPC_ADDRESS); + strNamenode = "fs.default.name=" + mr.createJobConf().get("fs.default.name"); map = "xargs cat"; reduce = "cat"; @@ -92,7 +92,7 @@ protected void setInputOutput() { inputFile = INPUT_FILE; outDir = OUTPUT_DIR; } - + protected void createInput() throws IOException { fileSys.delete(new Path(INPUT_DIR), true); @@ -100,7 +100,7 @@ protected void createInput() throws IOException String inputFileString = "symlink1/cacheArchive1\nsymlink2/cacheArchive2"; dos.write(inputFileString.getBytes("UTF-8")); dos.close(); - + DataOutputStream out = fileSys.create(new Path(CACHE_ARCHIVE_1.toString())); ZipOutputStream zos = new ZipOutputStream(out); ZipEntry ze = new ZipEntry(CACHE_FILE_1.toString()); @@ -133,6 +133,10 @@ protected String[] genArgs() { args.add(strNamenode); args.add("-jobconf"); args.add(strJobTracker); + args.add("-jobconf"); + args.add("mapred.jar=" + STREAMING_JAR); + args.add("-jobconf"); + args.add("mapreduce.framework.name=yarn"); return super.genArgs(); } @@ -144,6 +148,6 @@ protected void checkOutput() throws IOException { LOG.info("Adding output from file: " + fileList[i]); output.append(StreamUtil.slurpHadoop(fileList[i], fileSys)); } - assertEquals(expectedOutput, output.toString()); + assertOutput(expectedOutput, output.toString()); } } diff --git a/hadoop-mapreduce-project/src/contrib/streaming/src/test/org/apache/hadoop/streaming/TestMultipleCachefiles.java b/hadoop-tools/hadoop-streaming/src/test/java/org/apache/hadoop/streaming/TestMultipleCachefiles.java similarity index 95% rename from hadoop-mapreduce-project/src/contrib/streaming/src/test/org/apache/hadoop/streaming/TestMultipleCachefiles.java rename to hadoop-tools/hadoop-streaming/src/test/java/org/apache/hadoop/streaming/TestMultipleCachefiles.java index 0bc2832d17..94e0c4222a 100644 --- a/hadoop-mapreduce-project/src/contrib/streaming/src/test/org/apache/hadoop/streaming/TestMultipleCachefiles.java +++ b/hadoop-tools/hadoop-streaming/src/test/java/org/apache/hadoop/streaming/TestMultipleCachefiles.java @@ -73,10 +73,8 @@ public void testMultipleCachefiles() throws Exception String namenode = fileSys.getUri().toString(); mr = new MiniMRCluster(1, namenode, 3); - // During tests, the default Configuration will use a local mapred - // So don't specify -config or -cluster - String strJobtracker = JTConfig.JT_IPC_ADDRESS + "=localhost:" + mr.getJobTrackerPort(); - String strNamenode = "fs.default.name=" + namenode; + String strJobtracker = JTConfig.JT_IPC_ADDRESS + "=localhost:" + mr.createJobConf().get(JTConfig.JT_IPC_ADDRESS); + String strNamenode = "fs.default.name=" + mr.createJobConf().get("fs.default.name"); String argv[] = new String[] { "-input", INPUT_FILE, "-output", OUTPUT_DIR, @@ -98,7 +96,9 @@ public void testMultipleCachefiles() throws Exception conf.get(JobConf.MAPRED_REDUCE_TASK_JAVA_OPTS, conf.get(JobConf.MAPRED_TASK_JAVA_OPTS, "")), "-cacheFile", fileSys.getUri() + CACHE_FILE + "#" + mapString, - "-cacheFile", fileSys.getUri() + CACHE_FILE_2 + "#" + mapString2 + "-cacheFile", fileSys.getUri() + CACHE_FILE_2 + "#" + mapString2, + "-jobconf", "mapred.jar=" + TestStreaming.STREAMING_JAR, + "-jobconf", "mapreduce.framework.name=yarn" }; fileSys.delete(new Path(OUTPUT_DIR), true); diff --git a/hadoop-mapreduce-project/src/contrib/streaming/src/test/org/apache/hadoop/streaming/TestRawBytesStreaming.java b/hadoop-tools/hadoop-streaming/src/test/java/org/apache/hadoop/streaming/TestRawBytesStreaming.java similarity index 96% rename from hadoop-mapreduce-project/src/contrib/streaming/src/test/org/apache/hadoop/streaming/TestRawBytesStreaming.java rename to hadoop-tools/hadoop-streaming/src/test/java/org/apache/hadoop/streaming/TestRawBytesStreaming.java index df2422ce91..7621fd1fe8 100644 --- a/hadoop-mapreduce-project/src/contrib/streaming/src/test/org/apache/hadoop/streaming/TestRawBytesStreaming.java +++ b/hadoop-tools/hadoop-streaming/src/test/java/org/apache/hadoop/streaming/TestRawBytesStreaming.java @@ -31,8 +31,8 @@ public class TestRawBytesStreaming { - protected File INPUT_FILE = new File("input.txt"); - protected File OUTPUT_DIR = new File("out"); + protected File INPUT_FILE = new File("target/input.txt"); + protected File OUTPUT_DIR = new File("target/out"); protected String input = "roses.are.red\nviolets.are.blue\nbunnies.are.pink\n"; protected String map = UtilTest.makeJavaCommand(RawBytesMapApp.class, new String[]{"."}); protected String reduce = UtilTest.makeJavaCommand(RawBytesReduceApp.class, new String[0]); diff --git a/hadoop-mapreduce-project/src/contrib/streaming/src/test/org/apache/hadoop/streaming/TestStreamAggregate.java b/hadoop-tools/hadoop-streaming/src/test/java/org/apache/hadoop/streaming/TestStreamAggregate.java similarity index 100% rename from hadoop-mapreduce-project/src/contrib/streaming/src/test/org/apache/hadoop/streaming/TestStreamAggregate.java rename to hadoop-tools/hadoop-streaming/src/test/java/org/apache/hadoop/streaming/TestStreamAggregate.java diff --git a/hadoop-mapreduce-project/src/contrib/streaming/src/test/org/apache/hadoop/streaming/TestStreamDataProtocol.java b/hadoop-tools/hadoop-streaming/src/test/java/org/apache/hadoop/streaming/TestStreamDataProtocol.java similarity index 100% rename from hadoop-mapreduce-project/src/contrib/streaming/src/test/org/apache/hadoop/streaming/TestStreamDataProtocol.java rename to hadoop-tools/hadoop-streaming/src/test/java/org/apache/hadoop/streaming/TestStreamDataProtocol.java diff --git a/hadoop-mapreduce-project/src/contrib/streaming/src/test/org/apache/hadoop/streaming/TestStreamJob.java b/hadoop-tools/hadoop-streaming/src/test/java/org/apache/hadoop/streaming/TestStreamJob.java similarity index 100% rename from hadoop-mapreduce-project/src/contrib/streaming/src/test/org/apache/hadoop/streaming/TestStreamJob.java rename to hadoop-tools/hadoop-streaming/src/test/java/org/apache/hadoop/streaming/TestStreamJob.java diff --git a/hadoop-mapreduce-project/src/contrib/streaming/src/test/org/apache/hadoop/streaming/TestStreamReduceNone.java b/hadoop-tools/hadoop-streaming/src/test/java/org/apache/hadoop/streaming/TestStreamReduceNone.java similarity index 100% rename from hadoop-mapreduce-project/src/contrib/streaming/src/test/org/apache/hadoop/streaming/TestStreamReduceNone.java rename to hadoop-tools/hadoop-streaming/src/test/java/org/apache/hadoop/streaming/TestStreamReduceNone.java diff --git a/hadoop-mapreduce-project/src/contrib/streaming/src/test/org/apache/hadoop/streaming/TestStreamXmlMultipleRecords.java b/hadoop-tools/hadoop-streaming/src/test/java/org/apache/hadoop/streaming/TestStreamXmlMultipleRecords.java similarity index 100% rename from hadoop-mapreduce-project/src/contrib/streaming/src/test/org/apache/hadoop/streaming/TestStreamXmlMultipleRecords.java rename to hadoop-tools/hadoop-streaming/src/test/java/org/apache/hadoop/streaming/TestStreamXmlMultipleRecords.java diff --git a/hadoop-mapreduce-project/src/contrib/streaming/src/test/org/apache/hadoop/streaming/TestStreamXmlRecordReader.java b/hadoop-tools/hadoop-streaming/src/test/java/org/apache/hadoop/streaming/TestStreamXmlRecordReader.java similarity index 97% rename from hadoop-mapreduce-project/src/contrib/streaming/src/test/org/apache/hadoop/streaming/TestStreamXmlRecordReader.java rename to hadoop-tools/hadoop-streaming/src/test/java/org/apache/hadoop/streaming/TestStreamXmlRecordReader.java index fa067ae132..7a1c6f6106 100644 --- a/hadoop-mapreduce-project/src/contrib/streaming/src/test/org/apache/hadoop/streaming/TestStreamXmlRecordReader.java +++ b/hadoop-tools/hadoop-streaming/src/test/java/org/apache/hadoop/streaming/TestStreamXmlRecordReader.java @@ -30,7 +30,7 @@ public class TestStreamXmlRecordReader extends TestStreaming { public TestStreamXmlRecordReader() throws IOException { - INPUT_FILE = new File("input.xml"); + INPUT_FILE = new File("target/input.xml"); input = "\t\nroses.are.red\t\nviolets.are.blue\t\n" + "bunnies.are.pink\t\n\t\n"; map = "cat"; diff --git a/hadoop-mapreduce-project/src/contrib/streaming/src/test/org/apache/hadoop/streaming/TestStreaming.java b/hadoop-tools/hadoop-streaming/src/test/java/org/apache/hadoop/streaming/TestStreaming.java similarity index 83% rename from hadoop-mapreduce-project/src/contrib/streaming/src/test/org/apache/hadoop/streaming/TestStreaming.java rename to hadoop-tools/hadoop-streaming/src/test/java/org/apache/hadoop/streaming/TestStreaming.java index 809baaadc3..98ed1a299e 100644 --- a/hadoop-mapreduce-project/src/contrib/streaming/src/test/org/apache/hadoop/streaming/TestStreaming.java +++ b/hadoop-tools/hadoop-streaming/src/test/java/org/apache/hadoop/streaming/TestStreaming.java @@ -20,7 +20,11 @@ import java.io.*; import java.util.ArrayList; +import java.util.Arrays; +import java.util.HashSet; +import java.util.Set; +import org.apache.hadoop.util.JarFinder; import org.junit.After; import org.junit.Before; import org.junit.Test; @@ -37,6 +41,8 @@ public class TestStreaming { + public static final String STREAMING_JAR = JarFinder.getJar(StreamJob.class); + // "map" command: grep -E (red|green|blue) // reduce command: uniq protected File TEST_DIR; @@ -60,7 +66,7 @@ public TestStreaming() throws IOException UtilTest utilTest = new UtilTest(getClass().getName()); utilTest.checkUserDir(); utilTest.redirectIfAntJunit(); - TEST_DIR = new File(getClass().getName()).getAbsoluteFile(); + TEST_DIR = new File("target/TestStreaming").getAbsoluteFile(); OUTPUT_DIR = new File(TEST_DIR, "out"); INPUT_FILE = new File(TEST_DIR, "input.txt"); } @@ -129,7 +135,18 @@ protected void checkOutput() throws IOException { fs.delete(outPath, true); System.err.println("outEx1=" + getExpectedOutput()); System.err.println(" out1=" + output); - assertEquals(getExpectedOutput(), output); + assertOutput(getExpectedOutput(), output); + } + + protected void assertOutput(String expectedOutput, String output) throws IOException { + String[] words = expectedOutput.split("\t\n"); + Set expectedWords = new HashSet(Arrays.asList(words)); + words = output.split("\t\n"); + Set returnedWords = new HashSet(Arrays.asList(words)); +// PrintWriter writer = new PrintWriter(new OutputStreamWriter(new FileOutputStream(new File("/tmp/tucu.txt"), true)), true); +// writer.println("** Expected: " + expectedOutput); +// writer.println("** Output : " + output); + assertTrue(returnedWords.containsAll(expectedWords)); } /** diff --git a/hadoop-mapreduce-project/src/contrib/streaming/src/test/org/apache/hadoop/streaming/TestStreamingBackground.java b/hadoop-tools/hadoop-streaming/src/test/java/org/apache/hadoop/streaming/TestStreamingBackground.java similarity index 97% rename from hadoop-mapreduce-project/src/contrib/streaming/src/test/org/apache/hadoop/streaming/TestStreamingBackground.java rename to hadoop-tools/hadoop-streaming/src/test/java/org/apache/hadoop/streaming/TestStreamingBackground.java index 1a922830e2..c18c283dd7 100644 --- a/hadoop-mapreduce-project/src/contrib/streaming/src/test/org/apache/hadoop/streaming/TestStreamingBackground.java +++ b/hadoop-tools/hadoop-streaming/src/test/java/org/apache/hadoop/streaming/TestStreamingBackground.java @@ -33,7 +33,7 @@ * with 10 seconds delay is submited. */ public class TestStreamingBackground { - protected File TEST_DIR = new File("TestStreamingBackground") + protected File TEST_DIR = new File("target/TestStreamingBackground") .getAbsoluteFile(); protected File INPUT_FILE = new File(TEST_DIR, "input.txt"); protected File OUTPUT_DIR = new File(TEST_DIR, "out"); diff --git a/hadoop-mapreduce-project/src/contrib/streaming/src/test/org/apache/hadoop/streaming/TestStreamingBadRecords.java b/hadoop-tools/hadoop-streaming/src/test/java/org/apache/hadoop/streaming/TestStreamingBadRecords.java similarity index 97% rename from hadoop-mapreduce-project/src/contrib/streaming/src/test/org/apache/hadoop/streaming/TestStreamingBadRecords.java rename to hadoop-tools/hadoop-streaming/src/test/java/org/apache/hadoop/streaming/TestStreamingBadRecords.java index 858fc71ed6..be10235dc6 100644 --- a/hadoop-mapreduce-project/src/contrib/streaming/src/test/org/apache/hadoop/streaming/TestStreamingBadRecords.java +++ b/hadoop-tools/hadoop-streaming/src/test/java/org/apache/hadoop/streaming/TestStreamingBadRecords.java @@ -185,7 +185,9 @@ public void testSkip() throws Exception { "-jobconf", "mapreduce.jobtracker.http.address=" +clusterConf.get(JTConfig.JT_HTTP_ADDRESS), "-jobconf", "mapreduce.task.files.preserve.failedtasks=true", - "-jobconf", "stream.tmpdir="+System.getProperty("test.build.data","/tmp") + "-jobconf", "stream.tmpdir="+System.getProperty("test.build.data","/tmp"), + "-jobconf", "mapred.jar=" + TestStreaming.STREAMING_JAR, + "-jobconf", "mapreduce.framework.name=yarn" }; StreamJob job = new StreamJob(args, false); job.go(); @@ -219,7 +221,9 @@ public void testNarrowDown() throws Exception { "-jobconf", "mapreduce.jobtracker.http.address=" +clusterConf.get(JTConfig.JT_HTTP_ADDRESS), "-jobconf", "mapreduce.task.files.preserve.failedtasks=true", - "-jobconf", "stream.tmpdir="+System.getProperty("test.build.data","/tmp") + "-jobconf", "stream.tmpdir="+System.getProperty("test.build.data","/tmp"), + "-jobconf", "mapred.jar=" + TestStreaming.STREAMING_JAR, + "-jobconf", "mapreduce.framework.name=yarn" }; StreamJob job = new StreamJob(args, false); job.go(); diff --git a/hadoop-mapreduce-project/src/contrib/streaming/src/test/org/apache/hadoop/streaming/TestStreamingCombiner.java b/hadoop-tools/hadoop-streaming/src/test/java/org/apache/hadoop/streaming/TestStreamingCombiner.java similarity index 100% rename from hadoop-mapreduce-project/src/contrib/streaming/src/test/org/apache/hadoop/streaming/TestStreamingCombiner.java rename to hadoop-tools/hadoop-streaming/src/test/java/org/apache/hadoop/streaming/TestStreamingCombiner.java diff --git a/hadoop-mapreduce-project/src/contrib/streaming/src/test/org/apache/hadoop/streaming/TestStreamingCounters.java b/hadoop-tools/hadoop-streaming/src/test/java/org/apache/hadoop/streaming/TestStreamingCounters.java similarity index 100% rename from hadoop-mapreduce-project/src/contrib/streaming/src/test/org/apache/hadoop/streaming/TestStreamingCounters.java rename to hadoop-tools/hadoop-streaming/src/test/java/org/apache/hadoop/streaming/TestStreamingCounters.java diff --git a/hadoop-mapreduce-project/src/contrib/streaming/src/test/org/apache/hadoop/streaming/TestStreamingExitStatus.java b/hadoop-tools/hadoop-streaming/src/test/java/org/apache/hadoop/streaming/TestStreamingExitStatus.java similarity index 98% rename from hadoop-mapreduce-project/src/contrib/streaming/src/test/org/apache/hadoop/streaming/TestStreamingExitStatus.java rename to hadoop-tools/hadoop-streaming/src/test/java/org/apache/hadoop/streaming/TestStreamingExitStatus.java index f234305506..411f740fcd 100644 --- a/hadoop-mapreduce-project/src/contrib/streaming/src/test/org/apache/hadoop/streaming/TestStreamingExitStatus.java +++ b/hadoop-tools/hadoop-streaming/src/test/java/org/apache/hadoop/streaming/TestStreamingExitStatus.java @@ -36,7 +36,7 @@ public class TestStreamingExitStatus { protected File TEST_DIR = - new File("TestStreamingExitStatus").getAbsoluteFile(); + new File("target/TestStreamingExitStatus").getAbsoluteFile(); protected File INPUT_FILE = new File(TEST_DIR, "input.txt"); protected File OUTPUT_DIR = new File(TEST_DIR, "out"); diff --git a/hadoop-mapreduce-project/src/contrib/streaming/src/test/org/apache/hadoop/streaming/TestStreamingFailure.java b/hadoop-tools/hadoop-streaming/src/test/java/org/apache/hadoop/streaming/TestStreamingFailure.java similarity index 100% rename from hadoop-mapreduce-project/src/contrib/streaming/src/test/org/apache/hadoop/streaming/TestStreamingFailure.java rename to hadoop-tools/hadoop-streaming/src/test/java/org/apache/hadoop/streaming/TestStreamingFailure.java diff --git a/hadoop-mapreduce-project/src/contrib/streaming/src/test/org/apache/hadoop/streaming/TestStreamingKeyValue.java b/hadoop-tools/hadoop-streaming/src/test/java/org/apache/hadoop/streaming/TestStreamingKeyValue.java similarity index 97% rename from hadoop-mapreduce-project/src/contrib/streaming/src/test/org/apache/hadoop/streaming/TestStreamingKeyValue.java rename to hadoop-tools/hadoop-streaming/src/test/java/org/apache/hadoop/streaming/TestStreamingKeyValue.java index ea0bdb440d..444355f4fb 100644 --- a/hadoop-mapreduce-project/src/contrib/streaming/src/test/org/apache/hadoop/streaming/TestStreamingKeyValue.java +++ b/hadoop-tools/hadoop-streaming/src/test/java/org/apache/hadoop/streaming/TestStreamingKeyValue.java @@ -32,8 +32,8 @@ */ public class TestStreamingKeyValue { - protected File INPUT_FILE = new File("input.txt"); - protected File OUTPUT_DIR = new File("stream_out"); + protected File INPUT_FILE = new File("target/input.txt"); + protected File OUTPUT_DIR = new File("target/stream_out"); // First line of input has 'key' 'tab' 'value' // Second line of input starts with a tab character. // So, it has empty key and the whole line as value. diff --git a/hadoop-mapreduce-project/src/contrib/streaming/src/test/org/apache/hadoop/streaming/TestStreamingOutputKeyValueTypes.java b/hadoop-tools/hadoop-streaming/src/test/java/org/apache/hadoop/streaming/TestStreamingOutputKeyValueTypes.java similarity index 100% rename from hadoop-mapreduce-project/src/contrib/streaming/src/test/org/apache/hadoop/streaming/TestStreamingOutputKeyValueTypes.java rename to hadoop-tools/hadoop-streaming/src/test/java/org/apache/hadoop/streaming/TestStreamingOutputKeyValueTypes.java diff --git a/hadoop-mapreduce-project/src/contrib/streaming/src/test/org/apache/hadoop/streaming/TestStreamingSeparator.java b/hadoop-tools/hadoop-streaming/src/test/java/org/apache/hadoop/streaming/TestStreamingSeparator.java similarity index 100% rename from hadoop-mapreduce-project/src/contrib/streaming/src/test/org/apache/hadoop/streaming/TestStreamingSeparator.java rename to hadoop-tools/hadoop-streaming/src/test/java/org/apache/hadoop/streaming/TestStreamingSeparator.java diff --git a/hadoop-mapreduce-project/src/contrib/streaming/src/test/org/apache/hadoop/streaming/TestStreamingStatus.java b/hadoop-tools/hadoop-streaming/src/test/java/org/apache/hadoop/streaming/TestStreamingStatus.java similarity index 96% rename from hadoop-mapreduce-project/src/contrib/streaming/src/test/org/apache/hadoop/streaming/TestStreamingStatus.java rename to hadoop-tools/hadoop-streaming/src/test/java/org/apache/hadoop/streaming/TestStreamingStatus.java index dbffb97a5e..b2af40adbf 100644 --- a/hadoop-mapreduce-project/src/contrib/streaming/src/test/org/apache/hadoop/streaming/TestStreamingStatus.java +++ b/hadoop-tools/hadoop-streaming/src/test/java/org/apache/hadoop/streaming/TestStreamingStatus.java @@ -146,7 +146,7 @@ protected void createInputAndScript(boolean isEmptyInput, file.close(); } - protected String[] genArgs(int jobtrackerPort, String mapper, String reducer) + protected String[] genArgs(String jobtracker, String mapper, String reducer) { return new String[] { "-input", INPUT_FILE, @@ -157,8 +157,10 @@ protected String[] genArgs(int jobtrackerPort, String mapper, String reducer) "-jobconf", MRJobConfig.NUM_REDUCES + "=1", "-jobconf", MRJobConfig.PRESERVE_FAILED_TASK_FILES + "=true", "-jobconf", "stream.tmpdir=" + new Path(TEST_ROOT_DIR).toUri().getPath(), - "-jobconf", JTConfig.JT_IPC_ADDRESS + "=localhost:"+jobtrackerPort, - "-jobconf", "fs.default.name=file:///" + "-jobconf", JTConfig.JT_IPC_ADDRESS + "="+jobtracker, + "-jobconf", "fs.default.name=file:///", + "-jobconf", "mapred.jar=" + TestStreaming.STREAMING_JAR, + "-jobconf", "mapreduce.framework.name=yarn" }; } @@ -250,7 +252,7 @@ private void testStreamJob(boolean isEmptyInput) void runStreamJob(TaskType type, boolean isEmptyInput) throws IOException { boolean mayExit = false; StreamJob job = new StreamJob(genArgs( - mr.getJobTrackerPort(), map, reduce), mayExit); + mr.createJobConf().get(JTConfig.JT_IPC_ADDRESS), map, reduce), mayExit); int returnValue = job.go(); assertEquals(0, returnValue); diff --git a/hadoop-mapreduce-project/src/contrib/streaming/src/test/org/apache/hadoop/streaming/TestStreamingStderr.java b/hadoop-tools/hadoop-streaming/src/test/java/org/apache/hadoop/streaming/TestStreamingStderr.java similarity index 95% rename from hadoop-mapreduce-project/src/contrib/streaming/src/test/org/apache/hadoop/streaming/TestStreamingStderr.java rename to hadoop-tools/hadoop-streaming/src/test/java/org/apache/hadoop/streaming/TestStreamingStderr.java index d6987c2ed6..b80777968e 100644 --- a/hadoop-mapreduce-project/src/contrib/streaming/src/test/org/apache/hadoop/streaming/TestStreamingStderr.java +++ b/hadoop-tools/hadoop-streaming/src/test/java/org/apache/hadoop/streaming/TestStreamingStderr.java @@ -89,14 +89,14 @@ public void runStreamJob(String baseName, boolean hasInput, // consumed by Hadoop for tasks that don't have any input. @Test public void testStderrNoInput() throws Exception { - runStreamJob("stderr-pre", false, 10000, 0, 0); + runStreamJob("target/stderr-pre", false, 10000, 0, 0); } // Streaming should continue to read stderr even after all input has // been consumed. @Test public void testStderrAfterOutput() throws Exception { - runStreamJob("stderr-post", false, 0, 0, 10000); + runStreamJob("target/stderr-post", false, 0, 0, 10000); } // This test should produce a task timeout if stderr lines aren't @@ -104,7 +104,7 @@ public void testStderrAfterOutput() throws Exception { // LocalJobRunner supports timeouts. @Test public void testStderrCountsAsProgress() throws Exception { - runStreamJob("stderr-progress", true, 10, 1000, 0); + runStreamJob("target/stderr-progress", true, 10, 1000, 0); } } diff --git a/hadoop-mapreduce-project/src/contrib/streaming/src/test/org/apache/hadoop/streaming/TestStreamingTaskLog.java b/hadoop-tools/hadoop-streaming/src/test/java/org/apache/hadoop/streaming/TestStreamingTaskLog.java similarity index 89% rename from hadoop-mapreduce-project/src/contrib/streaming/src/test/org/apache/hadoop/streaming/TestStreamingTaskLog.java rename to hadoop-tools/hadoop-streaming/src/test/java/org/apache/hadoop/streaming/TestStreamingTaskLog.java index 6a871da011..e9a0286cf1 100644 --- a/hadoop-mapreduce-project/src/contrib/streaming/src/test/org/apache/hadoop/streaming/TestStreamingTaskLog.java +++ b/hadoop-tools/hadoop-streaming/src/test/java/org/apache/hadoop/streaming/TestStreamingTaskLog.java @@ -39,8 +39,8 @@ */ public class TestStreamingTaskLog { String input = "the dummy input"; - Path inputPath = new Path("inDir"); - Path outputPath = new Path("outDir"); + Path inputPath = new Path("target/inDir"); + Path outputPath = new Path("target/outDir"); String map = null; MiniMRCluster mr = null; FileSystem fs = null; @@ -52,12 +52,14 @@ String[] genArgs() { "-output", outputPath.toString(), "-mapper", map, "-reducer", StreamJob.REDUCE_NONE, - "-jobconf", "mapred.job.tracker=" + "localhost:" + mr.getJobTrackerPort(), + "-jobconf", "mapred.job.tracker=" + mr.createJobConf().get(JTConfig.JT_IPC_ADDRESS), "-jobconf", "fs.default.name=" + fs.getUri().toString(), "-jobconf", "mapred.map.tasks=1", "-jobconf", "keep.failed.task.files=true", "-jobconf", "mapreduce.task.userlog.limit.kb=" + USERLOG_LIMIT_KB, - "-jobconf", "stream.tmpdir="+System.getProperty("test.build.data","/tmp") + "-jobconf", "stream.tmpdir="+System.getProperty("test.build.data","/tmp"), + "-jobconf", "mapred.jar=" + TestStreaming.STREAMING_JAR, + "-jobconf", "mapreduce.framework.name=yarn" }; } @@ -92,7 +94,6 @@ public void testStreamingTaskLogWithHadoopCmd() { fs.delete(outputPath, true); assertFalse("output not cleaned up", fs.exists(outputPath)); - mr.waitUntilIdle(); } catch(IOException e) { fail(e.toString()); } finally { @@ -135,9 +136,9 @@ private void runStreamJobAndValidateEnv() throws IOException { // validate environment variables set for the child(script) of java process String env = MapReduceTestUtil.readOutput(outputPath, mr.createJobConf()); long logSize = USERLOG_LIMIT_KB * 1024; - assertTrue("environment set for child is wrong", env.contains("INFO,TLA") - && env.contains("-Dhadoop.tasklog.taskid=attempt_") - && env.contains("-Dhadoop.tasklog.totalLogFileSize=" + logSize) - && env.contains("-Dhadoop.tasklog.iscleanup=false")); + assertTrue("environment set for child is wrong", env.contains("INFO,CLA") + && env.contains("-Dyarn.app.mapreduce.container.log.dir=") + && env.contains("-Dyarn.app.mapreduce.container.log.filesize=" + logSize) + && env.contains("-Dlog4j.configuration=")); } } diff --git a/hadoop-mapreduce-project/src/contrib/streaming/src/test/org/apache/hadoop/streaming/TestSymLink.java b/hadoop-tools/hadoop-streaming/src/test/java/org/apache/hadoop/streaming/TestSymLink.java similarity index 94% rename from hadoop-mapreduce-project/src/contrib/streaming/src/test/org/apache/hadoop/streaming/TestSymLink.java rename to hadoop-tools/hadoop-streaming/src/test/java/org/apache/hadoop/streaming/TestSymLink.java index 166a62372e..cc6a8cdd85 100644 --- a/hadoop-mapreduce-project/src/contrib/streaming/src/test/org/apache/hadoop/streaming/TestSymLink.java +++ b/hadoop-tools/hadoop-streaming/src/test/java/org/apache/hadoop/streaming/TestSymLink.java @@ -64,8 +64,8 @@ public void testSymLink() throws Exception mr = new MiniMRCluster(1, namenode, 3); // During tests, the default Configuration will use a local mapred // So don't specify -config or -cluster - String strJobtracker = JTConfig.JT_IPC_ADDRESS + "=localhost:" + mr.getJobTrackerPort(); - String strNamenode = "fs.default.name=" + namenode; + String strJobtracker = JTConfig.JT_IPC_ADDRESS + "=localhost:" + mr.createJobConf().get(JTConfig.JT_IPC_ADDRESS); + String strNamenode = "fs.default.name=" + mr.createJobConf().get("fs.default.name"); String argv[] = new String[] { "-input", INPUT_FILE, "-output", OUTPUT_DIR, @@ -86,7 +86,9 @@ public void testSymLink() throws Exception "-Dbuild.test=" + System.getProperty("build.test") + " " + conf.get(JobConf.MAPRED_REDUCE_TASK_JAVA_OPTS, conf.get(JobConf.MAPRED_TASK_JAVA_OPTS, "")), - "-cacheFile", fileSys.getUri() + CACHE_FILE + "#testlink" + "-cacheFile", fileSys.getUri() + CACHE_FILE + "#testlink", + "-jobconf", "mapred.jar=" + TestStreaming.STREAMING_JAR, + "-jobconf", "mapreduce.framework.name=yarn" }; fileSys.delete(new Path(OUTPUT_DIR), true); diff --git a/hadoop-mapreduce-project/src/contrib/streaming/src/test/org/apache/hadoop/streaming/TestTypedBytesStreaming.java b/hadoop-tools/hadoop-streaming/src/test/java/org/apache/hadoop/streaming/TestTypedBytesStreaming.java similarity index 96% rename from hadoop-mapreduce-project/src/contrib/streaming/src/test/org/apache/hadoop/streaming/TestTypedBytesStreaming.java rename to hadoop-tools/hadoop-streaming/src/test/java/org/apache/hadoop/streaming/TestTypedBytesStreaming.java index a96709a102..05a050cac8 100644 --- a/hadoop-mapreduce-project/src/contrib/streaming/src/test/org/apache/hadoop/streaming/TestTypedBytesStreaming.java +++ b/hadoop-tools/hadoop-streaming/src/test/java/org/apache/hadoop/streaming/TestTypedBytesStreaming.java @@ -33,8 +33,8 @@ public class TestTypedBytesStreaming { - protected File INPUT_FILE = new File("input.txt"); - protected File OUTPUT_DIR = new File("out"); + protected File INPUT_FILE = new File("target/input.txt"); + protected File OUTPUT_DIR = new File("target/out"); protected String input = "roses.are.red\nviolets.are.blue\nbunnies.are.pink\n"; protected String map = UtilTest.makeJavaCommand(TypedBytesMapApp.class, new String[]{"."}); protected String reduce = UtilTest.makeJavaCommand(TypedBytesReduceApp.class, new String[0]); diff --git a/hadoop-mapreduce-project/src/contrib/streaming/src/test/org/apache/hadoop/streaming/TestUlimit.java b/hadoop-tools/hadoop-streaming/src/test/java/org/apache/hadoop/streaming/TestUlimit.java similarity index 88% rename from hadoop-mapreduce-project/src/contrib/streaming/src/test/org/apache/hadoop/streaming/TestUlimit.java rename to hadoop-tools/hadoop-streaming/src/test/java/org/apache/hadoop/streaming/TestUlimit.java index 068319cadf..9d35d7ae68 100644 --- a/hadoop-mapreduce-project/src/contrib/streaming/src/test/org/apache/hadoop/streaming/TestUlimit.java +++ b/hadoop-tools/hadoop-streaming/src/test/java/org/apache/hadoop/streaming/TestUlimit.java @@ -43,8 +43,8 @@ */ public class TestUlimit { String input = "the dummy input"; - Path inputPath = new Path("/testing/in"); - Path outputPath = new Path("/testing/out"); + Path inputPath = new Path("target/testing/in"); + Path outputPath = new Path("target/testing/out"); String map = null; MiniDFSCluster dfs = null; MiniMRCluster mr = null; @@ -52,6 +52,8 @@ public class TestUlimit { private static String SET_MEMORY_LIMIT = "786432"; // 768MB String[] genArgs(String memLimit) { + String strJobtracker = JTConfig.JT_IPC_ADDRESS + "=localhost:" + mr.createJobConf().get(JTConfig.JT_IPC_ADDRESS); + String strNamenode = "fs.default.name=" + mr.createJobConf().get("fs.default.name"); return new String[] { "-input", inputPath.toString(), "-output", outputPath.toString(), @@ -60,12 +62,12 @@ String[] genArgs(String memLimit) { "-numReduceTasks", "0", "-jobconf", MRJobConfig.NUM_MAPS + "=1", "-jobconf", JobConf.MAPRED_MAP_TASK_ULIMIT + "=" + memLimit, - "-jobconf", JTConfig.JT_IPC_ADDRESS + "=localhost:" + - mr.getJobTrackerPort(), - "-jobconf", "fs.default.name=" + "hdfs://localhost:" - + dfs.getNameNodePort(), + "-jobconf", strNamenode, + "-jobconf", strJobtracker, "-jobconf", "stream.tmpdir=" + - System.getProperty("test.build.data","/tmp") + System.getProperty("test.build.data","/tmp"), + "-jobconf", "mapred.jar=" + TestStreaming.STREAMING_JAR, + "-jobconf", "mapreduce.framework.name=yarn" }; } diff --git a/hadoop-mapreduce-project/src/contrib/streaming/src/test/org/apache/hadoop/streaming/TrApp.java b/hadoop-tools/hadoop-streaming/src/test/java/org/apache/hadoop/streaming/TrApp.java similarity index 100% rename from hadoop-mapreduce-project/src/contrib/streaming/src/test/org/apache/hadoop/streaming/TrApp.java rename to hadoop-tools/hadoop-streaming/src/test/java/org/apache/hadoop/streaming/TrApp.java diff --git a/hadoop-mapreduce-project/src/contrib/streaming/src/test/org/apache/hadoop/streaming/TrAppReduce.java b/hadoop-tools/hadoop-streaming/src/test/java/org/apache/hadoop/streaming/TrAppReduce.java similarity index 100% rename from hadoop-mapreduce-project/src/contrib/streaming/src/test/org/apache/hadoop/streaming/TrAppReduce.java rename to hadoop-tools/hadoop-streaming/src/test/java/org/apache/hadoop/streaming/TrAppReduce.java diff --git a/hadoop-mapreduce-project/src/contrib/streaming/src/test/org/apache/hadoop/streaming/TypedBytesMapApp.java b/hadoop-tools/hadoop-streaming/src/test/java/org/apache/hadoop/streaming/TypedBytesMapApp.java similarity index 100% rename from hadoop-mapreduce-project/src/contrib/streaming/src/test/org/apache/hadoop/streaming/TypedBytesMapApp.java rename to hadoop-tools/hadoop-streaming/src/test/java/org/apache/hadoop/streaming/TypedBytesMapApp.java diff --git a/hadoop-mapreduce-project/src/contrib/streaming/src/test/org/apache/hadoop/streaming/TypedBytesReduceApp.java b/hadoop-tools/hadoop-streaming/src/test/java/org/apache/hadoop/streaming/TypedBytesReduceApp.java similarity index 100% rename from hadoop-mapreduce-project/src/contrib/streaming/src/test/org/apache/hadoop/streaming/TypedBytesReduceApp.java rename to hadoop-tools/hadoop-streaming/src/test/java/org/apache/hadoop/streaming/TypedBytesReduceApp.java diff --git a/hadoop-mapreduce-project/src/contrib/streaming/src/test/org/apache/hadoop/streaming/UlimitApp.java b/hadoop-tools/hadoop-streaming/src/test/java/org/apache/hadoop/streaming/UlimitApp.java similarity index 100% rename from hadoop-mapreduce-project/src/contrib/streaming/src/test/org/apache/hadoop/streaming/UlimitApp.java rename to hadoop-tools/hadoop-streaming/src/test/java/org/apache/hadoop/streaming/UlimitApp.java diff --git a/hadoop-mapreduce-project/src/contrib/streaming/src/test/org/apache/hadoop/streaming/UniqApp.java b/hadoop-tools/hadoop-streaming/src/test/java/org/apache/hadoop/streaming/UniqApp.java similarity index 100% rename from hadoop-mapreduce-project/src/contrib/streaming/src/test/org/apache/hadoop/streaming/UniqApp.java rename to hadoop-tools/hadoop-streaming/src/test/java/org/apache/hadoop/streaming/UniqApp.java diff --git a/hadoop-mapreduce-project/src/contrib/streaming/src/test/org/apache/hadoop/streaming/UtilTest.java b/hadoop-tools/hadoop-streaming/src/test/java/org/apache/hadoop/streaming/UtilTest.java similarity index 93% rename from hadoop-mapreduce-project/src/contrib/streaming/src/test/org/apache/hadoop/streaming/UtilTest.java rename to hadoop-tools/hadoop-streaming/src/test/java/org/apache/hadoop/streaming/UtilTest.java index 73e1565496..2766969f6a 100644 --- a/hadoop-mapreduce-project/src/contrib/streaming/src/test/org/apache/hadoop/streaming/UtilTest.java +++ b/hadoop-tools/hadoop-streaming/src/test/java/org/apache/hadoop/streaming/UtilTest.java @@ -66,11 +66,11 @@ public UtilTest(String testName) { } void checkUserDir() { - // trunk/src/contrib/streaming --> trunk/build/contrib/streaming/test/data - if (!userDir_.equals(antTestDir_)) { - // because changes to user.dir are ignored by File static methods. - throw new IllegalStateException("user.dir != test.build.data. The junit Ant task must be forked."); - } +// // trunk/src/contrib/streaming --> trunk/build/contrib/streaming/test/data +// if (!userDir_.equals(antTestDir_)) { +// // because changes to user.dir are ignored by File static methods. +// throw new IllegalStateException("user.dir != test.build.data. The junit Ant task must be forked."); +// } } void redirectIfAntJunit() throws IOException diff --git a/hadoop-mapreduce-project/src/contrib/streaming/src/test/org/apache/hadoop/streaming/ValueCountReduce.java b/hadoop-tools/hadoop-streaming/src/test/java/org/apache/hadoop/streaming/ValueCountReduce.java similarity index 100% rename from hadoop-mapreduce-project/src/contrib/streaming/src/test/org/apache/hadoop/streaming/ValueCountReduce.java rename to hadoop-tools/hadoop-streaming/src/test/java/org/apache/hadoop/streaming/ValueCountReduce.java diff --git a/hadoop-mapreduce-project/src/contrib/streaming/src/test/org/apache/hadoop/typedbytes/TestIO.java b/hadoop-tools/hadoop-streaming/src/test/java/org/apache/hadoop/typedbytes/TestIO.java similarity index 100% rename from hadoop-mapreduce-project/src/contrib/streaming/src/test/org/apache/hadoop/typedbytes/TestIO.java rename to hadoop-tools/hadoop-streaming/src/test/java/org/apache/hadoop/typedbytes/TestIO.java diff --git a/hadoop-mapreduce-project/src/contrib/streaming/src/test/org/apache/hadoop/typedbytes/TestTypedBytesWritable.java b/hadoop-tools/hadoop-streaming/src/test/java/org/apache/hadoop/typedbytes/TestTypedBytesWritable.java similarity index 100% rename from hadoop-mapreduce-project/src/contrib/streaming/src/test/org/apache/hadoop/typedbytes/TestTypedBytesWritable.java rename to hadoop-tools/hadoop-streaming/src/test/java/org/apache/hadoop/typedbytes/TestTypedBytesWritable.java diff --git a/hadoop-tools/pom.xml b/hadoop-tools/pom.xml index 7f82ab1a67..2347ec3e0e 100644 --- a/hadoop-tools/pom.xml +++ b/hadoop-tools/pom.xml @@ -21,13 +21,14 @@ ../hadoop-project org.apache.hadoop - hadoop-tools-project + hadoop-tools 0.24.0-SNAPSHOT Apache Hadoop Tools Apache Hadoop Tools pom + hadoop-streaming