diff --git a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/response/READDIR3Response.java b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/response/READDIR3Response.java index fa54c5459f..9f8d6760b5 100644 --- a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/response/READDIR3Response.java +++ b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/response/READDIR3Response.java @@ -17,12 +17,14 @@ */ package org.apache.hadoop.nfs.nfs3.response; +import java.util.Arrays; +import java.util.Collections; +import java.util.List; + import org.apache.hadoop.nfs.nfs3.Nfs3FileAttributes; import org.apache.hadoop.nfs.nfs3.Nfs3Status; import org.apache.hadoop.oncrpc.XDR; -import com.google.common.collect.ObjectArrays; - /** * READDIR3 Response */ @@ -56,12 +58,11 @@ long getCookie() { } public static class DirList3 { - final Entry3 entries[]; + final List entries; final boolean eof; public DirList3(Entry3[] entries, boolean eof) { - this.entries = ObjectArrays.newArray(entries, entries.length); - System.arraycopy(this.entries, 0, entries, 0, entries.length); + this.entries = Collections.unmodifiableList(Arrays.asList(entries)); this.eof = eof; } } @@ -102,12 +103,11 @@ public XDR send(XDR xdr, int xid) { if (getStatus() == Nfs3Status.NFS3_OK) { xdr.writeLongAsHyper(cookieVerf); - Entry3[] f = dirList.entries; - for (int i = 0; i < f.length; i++) { + for (Entry3 e : dirList.entries) { xdr.writeBoolean(true); // Value follows - xdr.writeLongAsHyper(f[i].getFileId()); - xdr.writeString(f[i].getName()); - xdr.writeLongAsHyper(f[i].getCookie()); + xdr.writeLongAsHyper(e.getFileId()); + xdr.writeString(e.getName()); + xdr.writeLongAsHyper(e.getCookie()); } xdr.writeBoolean(false); diff --git a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/response/READDIRPLUS3Response.java b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/response/READDIRPLUS3Response.java index 77794cf48a..6b41cb27f7 100644 --- a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/response/READDIRPLUS3Response.java +++ b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/response/READDIRPLUS3Response.java @@ -17,13 +17,15 @@ */ package org.apache.hadoop.nfs.nfs3.response; +import java.util.Arrays; +import java.util.Collections; +import java.util.List; + import org.apache.hadoop.nfs.nfs3.FileHandle; import org.apache.hadoop.nfs.nfs3.Nfs3FileAttributes; import org.apache.hadoop.nfs.nfs3.Nfs3Status; import org.apache.hadoop.oncrpc.XDR; -import com.google.common.collect.ObjectArrays; - /** * READDIRPLUS3 Response */ @@ -60,16 +62,15 @@ void seralize(XDR xdr) { } public static class DirListPlus3 { - EntryPlus3 entries[]; + List entries; boolean eof; public DirListPlus3(EntryPlus3[] entries, boolean eof) { - this.entries = ObjectArrays.newArray(entries, entries.length); - System.arraycopy(this.entries, 0, entries, 0, entries.length); + this.entries = Collections.unmodifiableList(Arrays.asList(entries)); this.eof = eof; } - EntryPlus3[] getEntries() { + List getEntries() { return entries; } @@ -101,10 +102,9 @@ public XDR send(XDR out, int xid) { if (getStatus() == Nfs3Status.NFS3_OK) { out.writeLongAsHyper(cookieVerf); - EntryPlus3[] f = dirListPlus.getEntries(); - for (int i = 0; i < f.length; i++) { + for (EntryPlus3 f : dirListPlus.getEntries()) { out.writeBoolean(true); // next - f[i].seralize(out); + f.seralize(out); } out.writeBoolean(false); diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 38da72197a..242b2b256a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -293,6 +293,9 @@ Release 2.1.1-beta - UNRELEASED HDFS-5104 Support dotdot name in NFS LOOKUP operation (brandonli) + HDFS-5107 Fix array copy error in Readdir and Readdirplus responses + (brandonli) + IMPROVEMENTS HDFS-4513. Clarify in the WebHDFS REST API that all JSON respsonses may